summaryrefslogtreecommitdiff
path: root/chromium/third_party/libjingle/source/talk/session
diff options
context:
space:
mode:
authorZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
committerZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
commit679147eead574d186ebf3069647b4c23e8ccace6 (patch)
treefc247a0ac8ff119f7c8550879ebb6d3dd8d1ff69 /chromium/third_party/libjingle/source/talk/session
downloadqtwebengine-chromium-679147eead574d186ebf3069647b4c23e8ccace6.tar.gz
Initial import.
Diffstat (limited to 'chromium/third_party/libjingle/source/talk/session')
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/audiomonitor.cc121
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/audiomonitor.h75
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/call.cc1097
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/call.h284
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/channel.cc2736
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/channel.h689
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/channel_unittest.cc2905
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/channelmanager.cc928
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/channelmanager.h311
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/channelmanager_unittest.cc596
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor.cc208
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor.h100
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor_unittest.cc232
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediamessages.cc394
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediamessages.h169
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediamessages_unittest.cc363
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediamonitor.cc108
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediamonitor.h98
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediarecorder.cc224
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediarecorder.h119
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediarecorder_unittest.cc358
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediasession.cc1657
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediasession.h497
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediasession_unittest.cc1905
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediasessionclient.cc1148
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediasessionclient.h175
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediasessionclient_unittest.cc3404
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/mediasink.h48
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter.cc132
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter.h86
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter_unittest.cc212
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/soundclip.cc82
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/soundclip.h70
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/srtpfilter.cc825
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/srtpfilter.h308
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/srtpfilter_unittest.cc863
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter.cc93
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter.h67
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter_unittest.cc184
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/typewrapping.h.pump297
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/typingmonitor.cc123
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/typingmonitor.h84
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/typingmonitor_unittest.cc92
-rw-r--r--chromium/third_party/libjingle/source/talk/session/media/voicechannel.h33
-rw-r--r--chromium/third_party/libjingle/source/talk/session/tunnel/pseudotcpchannel.cc600
-rw-r--r--chromium/third_party/libjingle/source/talk/session/tunnel/pseudotcpchannel.h140
-rw-r--r--chromium/third_party/libjingle/source/talk/session/tunnel/securetunnelsessionclient.cc387
-rw-r--r--chromium/third_party/libjingle/source/talk/session/tunnel/securetunnelsessionclient.h165
-rw-r--r--chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient.cc432
-rw-r--r--chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient.h182
-rw-r--r--chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient_unittest.cc226
51 files changed, 26632 insertions, 0 deletions
diff --git a/chromium/third_party/libjingle/source/talk/session/media/audiomonitor.cc b/chromium/third_party/libjingle/source/talk/session/media/audiomonitor.cc
new file mode 100644
index 00000000000..385702f75f7
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/audiomonitor.cc
@@ -0,0 +1,121 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/audiomonitor.h"
+#include "talk/session/media/voicechannel.h"
+#include <cassert>
+
+namespace cricket {
+
+const uint32 MSG_MONITOR_POLL = 1;
+const uint32 MSG_MONITOR_START = 2;
+const uint32 MSG_MONITOR_STOP = 3;
+const uint32 MSG_MONITOR_SIGNAL = 4;
+
+AudioMonitor::AudioMonitor(VoiceChannel *voice_channel,
+ talk_base::Thread *monitor_thread) {
+ voice_channel_ = voice_channel;
+ monitoring_thread_ = monitor_thread;
+ monitoring_ = false;
+}
+
+AudioMonitor::~AudioMonitor() {
+ voice_channel_->worker_thread()->Clear(this);
+ monitoring_thread_->Clear(this);
+}
+
+void AudioMonitor::Start(int milliseconds) {
+ rate_ = milliseconds;
+ if (rate_ < 100)
+ rate_ = 100;
+ voice_channel_->worker_thread()->Post(this, MSG_MONITOR_START);
+}
+
+void AudioMonitor::Stop() {
+ voice_channel_->worker_thread()->Post(this, MSG_MONITOR_STOP);
+}
+
+void AudioMonitor::OnMessage(talk_base::Message *message) {
+ talk_base::CritScope cs(&crit_);
+
+ switch (message->message_id) {
+ case MSG_MONITOR_START:
+ assert(talk_base::Thread::Current() == voice_channel_->worker_thread());
+ if (!monitoring_) {
+ monitoring_ = true;
+ PollVoiceChannel();
+ }
+ break;
+
+ case MSG_MONITOR_STOP:
+ assert(talk_base::Thread::Current() == voice_channel_->worker_thread());
+ if (monitoring_) {
+ monitoring_ = false;
+ voice_channel_->worker_thread()->Clear(this);
+ }
+ break;
+
+ case MSG_MONITOR_POLL:
+ assert(talk_base::Thread::Current() == voice_channel_->worker_thread());
+ PollVoiceChannel();
+ break;
+
+ case MSG_MONITOR_SIGNAL:
+ {
+ assert(talk_base::Thread::Current() == monitoring_thread_);
+ AudioInfo info = audio_info_;
+ crit_.Leave();
+ SignalUpdate(this, info);
+ crit_.Enter();
+ }
+ break;
+ }
+}
+
+void AudioMonitor::PollVoiceChannel() {
+ talk_base::CritScope cs(&crit_);
+ assert(talk_base::Thread::Current() == voice_channel_->worker_thread());
+
+ // Gather connection infos
+ audio_info_.input_level = voice_channel_->GetInputLevel_w();
+ audio_info_.output_level = voice_channel_->GetOutputLevel_w();
+ voice_channel_->GetActiveStreams_w(&audio_info_.active_streams);
+
+ // Signal the monitoring thread, start another poll timer
+ monitoring_thread_->Post(this, MSG_MONITOR_SIGNAL);
+ voice_channel_->worker_thread()->PostDelayed(rate_, this, MSG_MONITOR_POLL);
+}
+
+VoiceChannel *AudioMonitor::voice_channel() {
+ return voice_channel_;
+}
+
+talk_base::Thread *AudioMonitor::monitor_thread() {
+ return monitoring_thread_;
+}
+
+}
diff --git a/chromium/third_party/libjingle/source/talk/session/media/audiomonitor.h b/chromium/third_party/libjingle/source/talk/session/media/audiomonitor.h
new file mode 100644
index 00000000000..5aff8fd1e66
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/audiomonitor.h
@@ -0,0 +1,75 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_AUDIOMONITOR_H_
+#define TALK_SESSION_MEDIA_AUDIOMONITOR_H_
+
+#include "talk/base/sigslot.h"
+#include "talk/base/thread.h"
+#include "talk/p2p/base/port.h"
+#include <vector>
+
+namespace cricket {
+
+class VoiceChannel;
+
+struct AudioInfo {
+ int input_level;
+ int output_level;
+ typedef std::vector<std::pair<uint32, int> > StreamList;
+ StreamList active_streams; // ssrcs contributing to output_level
+};
+
+class AudioMonitor : public talk_base::MessageHandler,
+ public sigslot::has_slots<> {
+ public:
+ AudioMonitor(VoiceChannel* voice_channel, talk_base::Thread *monitor_thread);
+ ~AudioMonitor();
+
+ void Start(int cms);
+ void Stop();
+
+ VoiceChannel* voice_channel();
+ talk_base::Thread *monitor_thread();
+
+ sigslot::signal2<AudioMonitor*, const AudioInfo&> SignalUpdate;
+
+ protected:
+ void OnMessage(talk_base::Message *message);
+ void PollVoiceChannel();
+
+ AudioInfo audio_info_;
+ VoiceChannel* voice_channel_;
+ talk_base::Thread* monitoring_thread_;
+ talk_base::CriticalSection crit_;
+ uint32 rate_;
+ bool monitoring_;
+};
+
+}
+
+#endif // TALK_SESSION_MEDIA_AUDIOMONITOR_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/call.cc b/chromium/third_party/libjingle/source/talk/session/media/call.cc
new file mode 100644
index 00000000000..c963c36ec36
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/call.cc
@@ -0,0 +1,1097 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/thread.h"
+#include "talk/base/window.h"
+#include "talk/media/base/constants.h"
+#include "talk/media/base/screencastid.h"
+#include "talk/p2p/base/parsing.h"
+#include "talk/session/media/call.h"
+#include "talk/session/media/mediasessionclient.h"
+
+namespace cricket {
+
+const uint32 MSG_CHECKAUTODESTROY = 1;
+const uint32 MSG_TERMINATECALL = 2;
+const uint32 MSG_PLAYDTMF = 3;
+
+namespace {
+const int kDTMFDelay = 300; // msec
+const size_t kMaxDTMFDigits = 30;
+const int kSendToVoicemailTimeout = 1000*20;
+const int kNoVoicemailTimeout = 1000*180;
+const int kMediaMonitorInterval = 1000*15;
+// In order to be the same as the server-side switching, this must be 100.
+const int kAudioMonitorPollPeriodMillis = 100;
+
+// V is a pointer type.
+template<class K, class V>
+V FindOrNull(const std::map<K, V>& map,
+ const K& key) {
+ typename std::map<K, V>::const_iterator it = map.find(key);
+ return (it != map.end()) ? it->second : NULL;
+}
+
+
+bool ContentContainsCrypto(const cricket::ContentInfo* content) {
+ if (content != NULL) {
+ const cricket::MediaContentDescription* desc =
+ static_cast<const cricket::MediaContentDescription*>(
+ content->description);
+ if (!desc || desc->cryptos().empty()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+}
+
+Call::Call(MediaSessionClient* session_client)
+ : id_(talk_base::CreateRandomId()),
+ session_client_(session_client),
+ local_renderer_(NULL),
+ has_video_(false),
+ has_data_(false),
+ muted_(false),
+ video_muted_(false),
+ send_to_voicemail_(true),
+ playing_dtmf_(false) {
+}
+
+Call::~Call() {
+ while (media_session_map_.begin() != media_session_map_.end()) {
+ Session* session = media_session_map_.begin()->second.session;
+ RemoveSession(session);
+ session_client_->session_manager()->DestroySession(session);
+ }
+ talk_base::Thread::Current()->Clear(this);
+}
+
+Session* Call::InitiateSession(const buzz::Jid& to,
+ const buzz::Jid& initiator,
+ const CallOptions& options) {
+ std::string id;
+ std::string initiator_name = initiator.Str();
+ return InternalInitiateSession(id, to, initiator_name, options);
+}
+
+Session *Call::InitiateSession(const std::string& id,
+ const buzz::Jid& to,
+ const CallOptions& options) {
+ std::string initiator_name;
+ return InternalInitiateSession(id, to, initiator_name, options);
+}
+
+void Call::IncomingSession(Session* session, const SessionDescription* offer) {
+ AddSession(session, offer);
+
+ // Make sure the session knows about the incoming ssrcs. This needs to be done
+ // prior to the SignalSessionState call, because that may trigger handling of
+ // these new SSRCs, so they need to be registered before then.
+ UpdateRemoteMediaStreams(session, offer->contents(), false);
+
+ // Missed the first state, the initiate, which is needed by
+ // call_client.
+ SignalSessionState(this, session, Session::STATE_RECEIVEDINITIATE);
+}
+
+void Call::AcceptSession(Session* session,
+ const cricket::CallOptions& options) {
+ MediaSessionMap::iterator it = media_session_map_.find(session->id());
+ if (it != media_session_map_.end()) {
+ const SessionDescription* answer = session_client_->CreateAnswer(
+ session->remote_description(), options);
+ it->second.session->Accept(answer);
+ }
+}
+
+void Call::RejectSession(Session* session) {
+ // Assume polite decline.
+ MediaSessionMap::iterator it = media_session_map_.find(session->id());
+ if (it != media_session_map_.end())
+ it->second.session->Reject(STR_TERMINATE_DECLINE);
+}
+
+void Call::TerminateSession(Session* session) {
+ MediaSessionMap::iterator it = media_session_map_.find(session->id());
+ if (it != media_session_map_.end()) {
+ // Assume polite terminations.
+ it->second.session->Terminate();
+ }
+}
+
+void Call::Terminate() {
+ // Copy the list so that we can iterate over it in a stable way
+ std::vector<Session*> sessions = this->sessions();
+
+ // There may be more than one session to terminate
+ std::vector<Session*>::iterator it;
+ for (it = sessions.begin(); it != sessions.end(); ++it) {
+ TerminateSession(*it);
+ }
+}
+
+bool Call::SendViewRequest(Session* session,
+ const ViewRequest& view_request) {
+ StaticVideoViews::const_iterator it;
+ for (it = view_request.static_video_views.begin();
+ it != view_request.static_video_views.end(); ++it) {
+ StreamParams found_stream;
+ bool found = false;
+ MediaStreams* recv_streams = GetMediaStreams(session);
+ if (recv_streams)
+ found = recv_streams->GetVideoStream(it->selector, &found_stream);
+ if (!found) {
+ LOG(LS_WARNING) << "Trying to send view request for ("
+ << it->selector.ssrc << ", '"
+ << it->selector.groupid << "', '"
+ << it->selector.streamid << "'"
+ << ") is not in the local streams.";
+ return false;
+ }
+ }
+
+ XmlElements elems;
+ WriteError error;
+ if (!WriteJingleViewRequest(CN_VIDEO, view_request, &elems, &error)) {
+ LOG(LS_ERROR) << "Couldn't write out view request: " << error.text;
+ return false;
+ }
+
+ return session->SendInfoMessage(elems);
+}
+
+void Call::SetLocalRenderer(VideoRenderer* renderer) {
+ local_renderer_ = renderer;
+ if (session_client_->GetFocus() == this) {
+ session_client_->channel_manager()->SetLocalRenderer(renderer);
+ }
+}
+
+void Call::SetVideoRenderer(Session* session, uint32 ssrc,
+ VideoRenderer* renderer) {
+ VideoChannel* video_channel = GetVideoChannel(session);
+ if (video_channel) {
+ video_channel->SetRenderer(ssrc, renderer);
+ LOG(LS_INFO) << "Set renderer of ssrc " << ssrc
+ << " to " << renderer << ".";
+ } else {
+ LOG(LS_INFO) << "Failed to set renderer of ssrc " << ssrc << ".";
+ }
+}
+
+void Call::OnMessage(talk_base::Message* message) {
+ switch (message->message_id) {
+ case MSG_CHECKAUTODESTROY:
+ // If no more sessions for this call, delete it
+ if (media_session_map_.empty())
+ session_client_->DestroyCall(this);
+ break;
+ case MSG_TERMINATECALL:
+ // Signal to the user that a timeout has happened and the call should
+ // be sent to voicemail.
+ if (send_to_voicemail_) {
+ SignalSetupToCallVoicemail();
+ }
+
+ // Callee didn't answer - terminate call
+ Terminate();
+ break;
+ case MSG_PLAYDTMF:
+ ContinuePlayDTMF();
+ }
+}
+
+std::vector<Session*> Call::sessions() {
+ std::vector<Session*> sessions;
+ MediaSessionMap::iterator it;
+ for (it = media_session_map_.begin(); it != media_session_map_.end(); ++it)
+ sessions.push_back(it->second.session);
+
+ return sessions;
+}
+
+bool Call::AddSession(Session* session, const SessionDescription* offer) {
+ bool succeeded = true;
+ MediaSession media_session;
+ media_session.session = session;
+ media_session.voice_channel = NULL;
+ media_session.video_channel = NULL;
+ media_session.data_channel = NULL;
+ media_session.recv_streams = NULL;
+
+ const ContentInfo* audio_offer = GetFirstAudioContent(offer);
+ const ContentInfo* video_offer = GetFirstVideoContent(offer);
+ const ContentInfo* data_offer = GetFirstDataContent(offer);
+ has_video_ = (video_offer != NULL);
+ has_data_ = (data_offer != NULL);
+
+ ASSERT(audio_offer != NULL);
+ // Create voice channel and start a media monitor.
+ media_session.voice_channel =
+ session_client_->channel_manager()->CreateVoiceChannel(
+ session, audio_offer->name, has_video_);
+ // voice_channel can be NULL in case of NullVoiceEngine.
+ if (media_session.voice_channel) {
+ media_session.voice_channel->SignalMediaMonitor.connect(
+ this, &Call::OnMediaMonitor);
+ media_session.voice_channel->StartMediaMonitor(kMediaMonitorInterval);
+ } else {
+ succeeded = false;
+ }
+
+ // If desired, create video channel and start a media monitor.
+ if (has_video_ && succeeded) {
+ media_session.video_channel =
+ session_client_->channel_manager()->CreateVideoChannel(
+ session, video_offer->name, true, media_session.voice_channel);
+ // video_channel can be NULL in case of NullVideoEngine.
+ if (media_session.video_channel) {
+ media_session.video_channel->SignalMediaMonitor.connect(
+ this, &Call::OnMediaMonitor);
+ media_session.video_channel->StartMediaMonitor(kMediaMonitorInterval);
+ } else {
+ succeeded = false;
+ }
+ }
+
+ // If desired, create data channel.
+ if (has_data_ && succeeded) {
+ const DataContentDescription* data = GetFirstDataContentDescription(offer);
+ if (data == NULL) {
+ succeeded = false;
+ } else {
+ DataChannelType data_channel_type = DCT_RTP;
+ if ((data->protocol() == kMediaProtocolSctp) ||
+ (data->protocol() == kMediaProtocolDtlsSctp)) {
+ data_channel_type = DCT_SCTP;
+ }
+
+ bool rtcp = false;
+ media_session.data_channel =
+ session_client_->channel_manager()->CreateDataChannel(
+ session, data_offer->name, rtcp, data_channel_type);
+ if (media_session.data_channel) {
+ media_session.data_channel->SignalDataReceived.connect(
+ this, &Call::OnDataReceived);
+ } else {
+ succeeded = false;
+ }
+ }
+ }
+
+ if (succeeded) {
+ // Add session to list, create channels for this session.
+ media_session.recv_streams = new MediaStreams;
+ media_session_map_[session->id()] = media_session;
+ session->SignalState.connect(this, &Call::OnSessionState);
+ session->SignalError.connect(this, &Call::OnSessionError);
+ session->SignalInfoMessage.connect(
+ this, &Call::OnSessionInfoMessage);
+ session->SignalRemoteDescriptionUpdate.connect(
+ this, &Call::OnRemoteDescriptionUpdate);
+ session->SignalReceivedTerminateReason
+ .connect(this, &Call::OnReceivedTerminateReason);
+
+ // If this call has the focus, enable this session's channels.
+ if (session_client_->GetFocus() == this) {
+ EnableSessionChannels(session, true);
+ }
+
+ // Signal client.
+ SignalAddSession(this, session);
+ }
+
+ return succeeded;
+}
+
+void Call::RemoveSession(Session* session) {
+ MediaSessionMap::iterator it = media_session_map_.find(session->id());
+ if (it == media_session_map_.end())
+ return;
+
+ // Remove all the screencasts, if they haven't been already.
+ while (!it->second.started_screencasts.empty()) {
+ uint32 ssrc = it->second.started_screencasts.begin()->first;
+ if (!StopScreencastWithoutSendingUpdate(it->second.session, ssrc)) {
+ LOG(LS_ERROR) << "Unable to stop screencast with ssrc " << ssrc;
+ ASSERT(false);
+ }
+ }
+
+ // Destroy video channel
+ VideoChannel* video_channel = it->second.video_channel;
+ if (video_channel != NULL)
+ session_client_->channel_manager()->DestroyVideoChannel(video_channel);
+
+ // Destroy voice channel
+ VoiceChannel* voice_channel = it->second.voice_channel;
+ if (voice_channel != NULL)
+ session_client_->channel_manager()->DestroyVoiceChannel(voice_channel);
+
+ // Destroy data channel
+ DataChannel* data_channel = it->second.data_channel;
+ if (data_channel != NULL)
+ session_client_->channel_manager()->DestroyDataChannel(data_channel);
+
+ delete it->second.recv_streams;
+ media_session_map_.erase(it);
+
+ // Destroy speaker monitor
+ StopSpeakerMonitor(session);
+
+ // Signal client
+ SignalRemoveSession(this, session);
+
+ // The call auto destroys when the last session is removed
+ talk_base::Thread::Current()->Post(this, MSG_CHECKAUTODESTROY);
+}
+
+VoiceChannel* Call::GetVoiceChannel(Session* session) const {
+ MediaSessionMap::const_iterator it = media_session_map_.find(session->id());
+ return (it != media_session_map_.end()) ? it->second.voice_channel : NULL;
+}
+
+VideoChannel* Call::GetVideoChannel(Session* session) const {
+ MediaSessionMap::const_iterator it = media_session_map_.find(session->id());
+ return (it != media_session_map_.end()) ? it->second.video_channel : NULL;
+}
+
+DataChannel* Call::GetDataChannel(Session* session) const {
+ MediaSessionMap::const_iterator it = media_session_map_.find(session->id());
+ return (it != media_session_map_.end()) ? it->second.data_channel : NULL;
+}
+
+MediaStreams* Call::GetMediaStreams(Session* session) const {
+ MediaSessionMap::const_iterator it = media_session_map_.find(session->id());
+ return (it != media_session_map_.end()) ? it->second.recv_streams : NULL;
+}
+
+void Call::EnableChannels(bool enable) {
+ MediaSessionMap::iterator it;
+ for (it = media_session_map_.begin(); it != media_session_map_.end(); ++it) {
+ EnableSessionChannels(it->second.session, enable);
+ }
+ session_client_->channel_manager()->SetLocalRenderer(
+ (enable) ? local_renderer_ : NULL);
+}
+
+void Call::EnableSessionChannels(Session* session, bool enable) {
+ MediaSessionMap::iterator it = media_session_map_.find(session->id());
+ if (it == media_session_map_.end())
+ return;
+
+ VoiceChannel* voice_channel = it->second.voice_channel;
+ VideoChannel* video_channel = it->second.video_channel;
+ DataChannel* data_channel = it->second.data_channel;
+ if (voice_channel != NULL)
+ voice_channel->Enable(enable);
+ if (video_channel != NULL)
+ video_channel->Enable(enable);
+ if (data_channel != NULL)
+ data_channel->Enable(enable);
+}
+
+void Call::Mute(bool mute) {
+ muted_ = mute;
+ MediaSessionMap::iterator it;
+ for (it = media_session_map_.begin(); it != media_session_map_.end(); ++it) {
+ if (it->second.voice_channel != NULL)
+ it->second.voice_channel->MuteStream(0, mute);
+ }
+}
+
+void Call::MuteVideo(bool mute) {
+ video_muted_ = mute;
+ MediaSessionMap::iterator it;
+ for (it = media_session_map_.begin(); it != media_session_map_.end(); ++it) {
+ if (it->second.video_channel != NULL)
+ it->second.video_channel->MuteStream(0, mute);
+ }
+}
+
+bool Call::SendData(Session* session,
+ const SendDataParams& params,
+ const talk_base::Buffer& payload,
+ SendDataResult* result) {
+ DataChannel* data_channel = GetDataChannel(session);
+ if (!data_channel) {
+ LOG(LS_WARNING) << "Could not send data: no data channel.";
+ return false;
+ }
+
+ return data_channel->SendData(params, payload, result);
+}
+
+void Call::PressDTMF(int event) {
+ // Queue up this digit
+ if (queued_dtmf_.size() < kMaxDTMFDigits) {
+ LOG(LS_INFO) << "Call::PressDTMF(" << event << ")";
+
+ queued_dtmf_.push_back(event);
+
+ if (!playing_dtmf_) {
+ ContinuePlayDTMF();
+ }
+ }
+}
+
+cricket::VideoFormat ScreencastFormatFromFps(int fps) {
+ // The capturer pretty much ignore this, but just in case we give it
+ // a resolution big enough to cover any expected desktop. In any
+ // case, it can't be 0x0, or the CaptureManager will fail to use it.
+ return cricket::VideoFormat(
+ 1, 1,
+ cricket::VideoFormat::FpsToInterval(fps), cricket::FOURCC_ANY);
+}
+
+bool Call::StartScreencast(Session* session,
+ const std::string& streamid, uint32 ssrc,
+ const ScreencastId& screencastid, int fps) {
+ MediaSessionMap::iterator it = media_session_map_.find(session->id());
+ if (it == media_session_map_.end()) {
+ return false;
+ }
+
+ VideoChannel *video_channel = GetVideoChannel(session);
+ if (!video_channel) {
+ LOG(LS_WARNING) << "Cannot add screencast"
+ << " because there is no video channel.";
+ return false;
+ }
+
+ VideoCapturer *capturer = video_channel->AddScreencast(ssrc, screencastid);
+ if (capturer == NULL) {
+ LOG(LS_WARNING) << "Could not create screencast capturer.";
+ return false;
+ }
+
+ VideoFormat format = ScreencastFormatFromFps(fps);
+ if (!session_client_->channel_manager()->StartVideoCapture(
+ capturer, format)) {
+ LOG(LS_WARNING) << "Could not start video capture.";
+ video_channel->RemoveScreencast(ssrc);
+ return false;
+ }
+
+ if (!video_channel->SetCapturer(ssrc, capturer)) {
+ LOG(LS_WARNING) << "Could not start sending screencast.";
+ session_client_->channel_manager()->StopVideoCapture(
+ capturer, ScreencastFormatFromFps(fps));
+ video_channel->RemoveScreencast(ssrc);
+ }
+
+ // TODO(pthatcher): Once the CaptureManager has a nicer interface
+ // for removing captures (such as having StartCapture return a
+ // handle), remove this StartedCapture stuff.
+ it->second.started_screencasts.insert(
+ std::make_pair(ssrc, StartedCapture(capturer, format)));
+
+ // TODO(pthatcher): Verify we aren't re-using an existing id or
+ // ssrc.
+ StreamParams stream;
+ stream.id = streamid;
+ stream.ssrcs.push_back(ssrc);
+ VideoContentDescription* video = CreateVideoStreamUpdate(stream);
+
+ // TODO(pthatcher): Wait until view request before sending video.
+ video_channel->SetLocalContent(video, CA_UPDATE);
+ SendVideoStreamUpdate(session, video);
+ return true;
+}
+
+bool Call::StopScreencast(Session* session,
+ const std::string& streamid, uint32 ssrc) {
+ if (!StopScreencastWithoutSendingUpdate(session, ssrc)) {
+ return false;
+ }
+
+ VideoChannel *video_channel = GetVideoChannel(session);
+ if (!video_channel) {
+ LOG(LS_WARNING) << "Cannot add screencast"
+ << " because there is no video channel.";
+ return false;
+ }
+
+ StreamParams stream;
+ stream.id = streamid;
+ // No ssrcs
+ VideoContentDescription* video = CreateVideoStreamUpdate(stream);
+
+ video_channel->SetLocalContent(video, CA_UPDATE);
+ SendVideoStreamUpdate(session, video);
+ return true;
+}
+
+bool Call::StopScreencastWithoutSendingUpdate(
+ Session* session, uint32 ssrc) {
+ MediaSessionMap::iterator it = media_session_map_.find(session->id());
+ if (it == media_session_map_.end()) {
+ return false;
+ }
+
+ VideoChannel *video_channel = GetVideoChannel(session);
+ if (!video_channel) {
+ LOG(LS_WARNING) << "Cannot remove screencast"
+ << " because there is no video channel.";
+ return false;
+ }
+
+ StartedScreencastMap::const_iterator screencast_iter =
+ it->second.started_screencasts.find(ssrc);
+ if (screencast_iter == it->second.started_screencasts.end()) {
+ LOG(LS_WARNING) << "Could not stop screencast " << ssrc
+ << " because there is no capturer.";
+ return false;
+ }
+
+ VideoCapturer* capturer = screencast_iter->second.capturer;
+ VideoFormat format = screencast_iter->second.format;
+ video_channel->SetCapturer(ssrc, NULL);
+ if (!session_client_->channel_manager()->StopVideoCapture(
+ capturer, format)) {
+ LOG(LS_WARNING) << "Could not stop screencast " << ssrc
+ << " because could not stop capture.";
+ return false;
+ }
+ video_channel->RemoveScreencast(ssrc);
+ it->second.started_screencasts.erase(ssrc);
+ return true;
+}
+
+VideoContentDescription* Call::CreateVideoStreamUpdate(
+ const StreamParams& stream) {
+ VideoContentDescription* video = new VideoContentDescription();
+ video->set_multistream(true);
+ video->set_partial(true);
+ video->AddStream(stream);
+ return video;
+}
+
+void Call::SendVideoStreamUpdate(
+ Session* session, VideoContentDescription* video) {
+ // Takes the ownership of |video|.
+ talk_base::scoped_ptr<VideoContentDescription> description(video);
+ const ContentInfo* video_info =
+ GetFirstVideoContent(session->local_description());
+ if (video_info == NULL) {
+ LOG(LS_WARNING) << "Cannot send stream update for video.";
+ return;
+ }
+
+ std::vector<ContentInfo> contents;
+ contents.push_back(
+ ContentInfo(video_info->name, video_info->type, description.get()));
+
+ session->SendDescriptionInfoMessage(contents);
+}
+
+void Call::ContinuePlayDTMF() {
+ playing_dtmf_ = false;
+
+ // Check to see if we have a queued tone
+ if (queued_dtmf_.size() > 0) {
+ playing_dtmf_ = true;
+
+ int tone = queued_dtmf_.front();
+ queued_dtmf_.pop_front();
+
+ LOG(LS_INFO) << "Call::ContinuePlayDTMF(" << tone << ")";
+ for (MediaSessionMap::iterator it = media_session_map_.begin();
+ it != media_session_map_.end(); ++it) {
+ if (it->second.voice_channel != NULL) {
+ it->second.voice_channel->PressDTMF(tone, true);
+ }
+ }
+
+ // Post a message to play the next tone or at least clear the playing_dtmf_
+ // bit.
+ talk_base::Thread::Current()->PostDelayed(kDTMFDelay, this, MSG_PLAYDTMF);
+ }
+}
+
+void Call::Join(Call* call, bool enable) {
+ for (MediaSessionMap::iterator it = call->media_session_map_.begin();
+ it != call->media_session_map_.end(); ++it) {
+ // Shouldn't already exist.
+ ASSERT(media_session_map_.find(it->first) == media_session_map_.end());
+ media_session_map_[it->first] = it->second;
+
+ it->second.session->SignalState.connect(this, &Call::OnSessionState);
+ it->second.session->SignalError.connect(this, &Call::OnSessionError);
+ it->second.session->SignalReceivedTerminateReason
+ .connect(this, &Call::OnReceivedTerminateReason);
+
+ EnableSessionChannels(it->second.session, enable);
+ }
+
+ // Moved all the sessions over, so the other call should no longer have any.
+ call->media_session_map_.clear();
+}
+
+void Call::StartConnectionMonitor(Session* session, int cms) {
+ VoiceChannel* voice_channel = GetVoiceChannel(session);
+ if (voice_channel) {
+ voice_channel->SignalConnectionMonitor.connect(this,
+ &Call::OnConnectionMonitor);
+ voice_channel->StartConnectionMonitor(cms);
+ }
+
+ VideoChannel* video_channel = GetVideoChannel(session);
+ if (video_channel) {
+ video_channel->SignalConnectionMonitor.connect(this,
+ &Call::OnConnectionMonitor);
+ video_channel->StartConnectionMonitor(cms);
+ }
+}
+
+void Call::StopConnectionMonitor(Session* session) {
+ VoiceChannel* voice_channel = GetVoiceChannel(session);
+ if (voice_channel) {
+ voice_channel->StopConnectionMonitor();
+ voice_channel->SignalConnectionMonitor.disconnect(this);
+ }
+
+ VideoChannel* video_channel = GetVideoChannel(session);
+ if (video_channel) {
+ video_channel->StopConnectionMonitor();
+ video_channel->SignalConnectionMonitor.disconnect(this);
+ }
+}
+
+void Call::StartAudioMonitor(Session* session, int cms) {
+ VoiceChannel* voice_channel = GetVoiceChannel(session);
+ if (voice_channel) {
+ voice_channel->SignalAudioMonitor.connect(this, &Call::OnAudioMonitor);
+ voice_channel->StartAudioMonitor(cms);
+ }
+}
+
+void Call::StopAudioMonitor(Session* session) {
+ VoiceChannel* voice_channel = GetVoiceChannel(session);
+ if (voice_channel) {
+ voice_channel->StopAudioMonitor();
+ voice_channel->SignalAudioMonitor.disconnect(this);
+ }
+}
+
+bool Call::IsAudioMonitorRunning(Session* session) {
+ VoiceChannel* voice_channel = GetVoiceChannel(session);
+ if (voice_channel) {
+ return voice_channel->IsAudioMonitorRunning();
+ } else {
+ return false;
+ }
+}
+
+void Call::StartSpeakerMonitor(Session* session) {
+ if (speaker_monitor_map_.find(session->id()) == speaker_monitor_map_.end()) {
+ if (!IsAudioMonitorRunning(session)) {
+ StartAudioMonitor(session, kAudioMonitorPollPeriodMillis);
+ }
+ CurrentSpeakerMonitor* speaker_monitor =
+ new cricket::CurrentSpeakerMonitor(this, session);
+ speaker_monitor->SignalUpdate.connect(this, &Call::OnSpeakerMonitor);
+ speaker_monitor->Start();
+ speaker_monitor_map_[session->id()] = speaker_monitor;
+ } else {
+ LOG(LS_WARNING) << "Already started speaker monitor for session "
+ << session->id() << ".";
+ }
+}
+
+void Call::StopSpeakerMonitor(Session* session) {
+ if (speaker_monitor_map_.find(session->id()) == speaker_monitor_map_.end()) {
+ LOG(LS_WARNING) << "Speaker monitor for session "
+ << session->id() << " already stopped.";
+ } else {
+ CurrentSpeakerMonitor* monitor = speaker_monitor_map_[session->id()];
+ monitor->Stop();
+ speaker_monitor_map_.erase(session->id());
+ delete monitor;
+ }
+}
+
+void Call::OnConnectionMonitor(VoiceChannel* channel,
+ const std::vector<ConnectionInfo> &infos) {
+ SignalConnectionMonitor(this, infos);
+}
+
+void Call::OnMediaMonitor(VoiceChannel* channel, const VoiceMediaInfo& info) {
+ last_voice_media_info_ = info;
+ SignalMediaMonitor(this, info);
+}
+
+void Call::OnAudioMonitor(VoiceChannel* channel, const AudioInfo& info) {
+ SignalAudioMonitor(this, info);
+}
+
+void Call::OnSpeakerMonitor(CurrentSpeakerMonitor* monitor, uint32 ssrc) {
+ Session* session = static_cast<Session*>(monitor->session());
+ MediaStreams* recv_streams = GetMediaStreams(session);
+ if (recv_streams) {
+ StreamParams stream;
+ recv_streams->GetAudioStream(StreamSelector(ssrc), &stream);
+ SignalSpeakerMonitor(this, session, stream);
+ }
+}
+
+void Call::OnConnectionMonitor(VideoChannel* channel,
+ const std::vector<ConnectionInfo> &infos) {
+ SignalVideoConnectionMonitor(this, infos);
+}
+
+void Call::OnMediaMonitor(VideoChannel* channel, const VideoMediaInfo& info) {
+ SignalVideoMediaMonitor(this, info);
+}
+
+void Call::OnDataReceived(DataChannel* channel,
+ const ReceiveDataParams& params,
+ const talk_base::Buffer& payload) {
+ SignalDataReceived(this, params, payload);
+}
+
+uint32 Call::id() {
+ return id_;
+}
+
+void Call::OnSessionState(BaseSession* base_session, BaseSession::State state) {
+ Session* session = static_cast<Session*>(base_session);
+ switch (state) {
+ case Session::STATE_RECEIVEDACCEPT:
+ UpdateRemoteMediaStreams(session,
+ session->remote_description()->contents(), false);
+ session_client_->session_manager()->signaling_thread()->Clear(this,
+ MSG_TERMINATECALL);
+ break;
+ case Session::STATE_RECEIVEDREJECT:
+ case Session::STATE_RECEIVEDTERMINATE:
+ session_client_->session_manager()->signaling_thread()->Clear(this,
+ MSG_TERMINATECALL);
+ break;
+ default:
+ break;
+ }
+ SignalSessionState(this, session, state);
+}
+
+void Call::OnSessionError(BaseSession* base_session, Session::Error error) {
+ session_client_->session_manager()->signaling_thread()->Clear(this,
+ MSG_TERMINATECALL);
+ SignalSessionError(this, static_cast<Session*>(base_session), error);
+}
+
+void Call::OnSessionInfoMessage(Session* session,
+ const buzz::XmlElement* action_elem) {
+ if (!IsJingleViewRequest(action_elem)) {
+ return;
+ }
+
+ ViewRequest view_request;
+ ParseError error;
+ if (!ParseJingleViewRequest(action_elem, &view_request, &error)) {
+ LOG(LS_WARNING) << "Failed to parse view request: " << error.text;
+ return;
+ }
+
+ VideoChannel* video_channel = GetVideoChannel(session);
+ if (video_channel == NULL) {
+ LOG(LS_WARNING) << "Ignore view request since we have no video channel.";
+ return;
+ }
+
+ if (!video_channel->ApplyViewRequest(view_request)) {
+ LOG(LS_WARNING) << "Failed to ApplyViewRequest.";
+ }
+}
+
+void Call::OnRemoteDescriptionUpdate(BaseSession* base_session,
+ const ContentInfos& updated_contents) {
+ Session* session = static_cast<Session*>(base_session);
+
+ const ContentInfo* audio_content = GetFirstAudioContent(updated_contents);
+ if (audio_content) {
+ const AudioContentDescription* audio_update =
+ static_cast<const AudioContentDescription*>(audio_content->description);
+ if (!audio_update->codecs().empty()) {
+ UpdateVoiceChannelRemoteContent(session, audio_update);
+ }
+ }
+
+ const ContentInfo* video_content = GetFirstVideoContent(updated_contents);
+ if (video_content) {
+ const VideoContentDescription* video_update =
+ static_cast<const VideoContentDescription*>(video_content->description);
+ if (!video_update->codecs().empty()) {
+ UpdateVideoChannelRemoteContent(session, video_update);
+ }
+ }
+
+ const ContentInfo* data_content = GetFirstDataContent(updated_contents);
+ if (data_content) {
+ const DataContentDescription* data_update =
+ static_cast<const DataContentDescription*>(data_content->description);
+ if (!data_update->codecs().empty()) {
+ UpdateDataChannelRemoteContent(session, data_update);
+ }
+ }
+
+ UpdateRemoteMediaStreams(session, updated_contents, true);
+}
+
+bool Call::UpdateVoiceChannelRemoteContent(
+ Session* session, const AudioContentDescription* audio) {
+ VoiceChannel* voice_channel = GetVoiceChannel(session);
+ if (!voice_channel->SetRemoteContent(audio, CA_UPDATE)) {
+ LOG(LS_ERROR) << "Failure in audio SetRemoteContent with CA_UPDATE";
+ session->SetError(BaseSession::ERROR_CONTENT);
+ return false;
+ }
+ return true;
+}
+
+bool Call::UpdateVideoChannelRemoteContent(
+ Session* session, const VideoContentDescription* video) {
+ VideoChannel* video_channel = GetVideoChannel(session);
+ if (!video_channel->SetRemoteContent(video, CA_UPDATE)) {
+ LOG(LS_ERROR) << "Failure in video SetRemoteContent with CA_UPDATE";
+ session->SetError(BaseSession::ERROR_CONTENT);
+ return false;
+ }
+ return true;
+}
+
+bool Call::UpdateDataChannelRemoteContent(
+ Session* session, const DataContentDescription* data) {
+ DataChannel* data_channel = GetDataChannel(session);
+ if (!data_channel->SetRemoteContent(data, CA_UPDATE)) {
+ LOG(LS_ERROR) << "Failure in data SetRemoteContent with CA_UPDATE";
+ session->SetError(BaseSession::ERROR_CONTENT);
+ return false;
+ }
+ return true;
+}
+
+void Call::UpdateRemoteMediaStreams(Session* session,
+ const ContentInfos& updated_contents,
+ bool update_channels) {
+ MediaStreams* recv_streams = GetMediaStreams(session);
+ if (!recv_streams)
+ return;
+
+ cricket::MediaStreams added_streams;
+ cricket::MediaStreams removed_streams;
+
+ const ContentInfo* audio_content = GetFirstAudioContent(updated_contents);
+ if (audio_content) {
+ const AudioContentDescription* audio_update =
+ static_cast<const AudioContentDescription*>(audio_content->description);
+ UpdateRecvStreams(audio_update->streams(),
+ update_channels ? GetVoiceChannel(session) : NULL,
+ recv_streams->mutable_audio(),
+ added_streams.mutable_audio(),
+ removed_streams.mutable_audio());
+ }
+
+ const ContentInfo* video_content = GetFirstVideoContent(updated_contents);
+ if (video_content) {
+ const VideoContentDescription* video_update =
+ static_cast<const VideoContentDescription*>(video_content->description);
+ UpdateRecvStreams(video_update->streams(),
+ update_channels ? GetVideoChannel(session) : NULL,
+ recv_streams->mutable_video(),
+ added_streams.mutable_video(),
+ removed_streams.mutable_video());
+ }
+
+ const ContentInfo* data_content = GetFirstDataContent(updated_contents);
+ if (data_content) {
+ const DataContentDescription* data_update =
+ static_cast<const DataContentDescription*>(data_content->description);
+ UpdateRecvStreams(data_update->streams(),
+ update_channels ? GetDataChannel(session) : NULL,
+ recv_streams->mutable_data(),
+ added_streams.mutable_data(),
+ removed_streams.mutable_data());
+ }
+
+ if (!added_streams.empty() || !removed_streams.empty()) {
+ SignalMediaStreamsUpdate(this, session, added_streams, removed_streams);
+ }
+}
+
+void FindStreamChanges(const std::vector<StreamParams>& streams,
+ const std::vector<StreamParams>& updates,
+ std::vector<StreamParams>* added_streams,
+ std::vector<StreamParams>* removed_streams) {
+ for (std::vector<StreamParams>::const_iterator update = updates.begin();
+ update != updates.end(); ++update) {
+ StreamParams stream;
+ if (GetStreamByIds(streams, update->groupid, update->id, &stream)) {
+ if (!update->has_ssrcs()) {
+ removed_streams->push_back(stream);
+ }
+ } else {
+ // There's a bug on reflector that will send <stream>s even
+ // though there is not ssrc (which means there isn't really a
+ // stream). To work around it, we simply ignore new <stream>s
+ // that don't have any ssrcs.
+ if (update->has_ssrcs()) {
+ added_streams->push_back(*update);
+ }
+ }
+ }
+}
+
+void Call::UpdateRecvStreams(const std::vector<StreamParams>& update_streams,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams,
+ std::vector<StreamParams>* added_streams,
+ std::vector<StreamParams>* removed_streams) {
+ FindStreamChanges(*recv_streams,
+ update_streams, added_streams, removed_streams);
+ AddRecvStreams(*added_streams,
+ channel, recv_streams);
+ RemoveRecvStreams(*removed_streams,
+ channel, recv_streams);
+}
+
+void Call::AddRecvStreams(const std::vector<StreamParams>& added_streams,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams) {
+ std::vector<StreamParams>::const_iterator stream;
+ for (stream = added_streams.begin();
+ stream != added_streams.end();
+ ++stream) {
+ AddRecvStream(*stream, channel, recv_streams);
+ }
+}
+
+void Call::AddRecvStream(const StreamParams& stream,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams) {
+ if (channel && stream.has_ssrcs()) {
+ channel->AddRecvStream(stream);
+ }
+ recv_streams->push_back(stream);
+}
+
+void Call::RemoveRecvStreams(const std::vector<StreamParams>& removed_streams,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams) {
+ std::vector<StreamParams>::const_iterator stream;
+ for (stream = removed_streams.begin();
+ stream != removed_streams.end();
+ ++stream) {
+ RemoveRecvStream(*stream, channel, recv_streams);
+ }
+}
+
+void Call::RemoveRecvStream(const StreamParams& stream,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams) {
+ if (channel && stream.has_ssrcs()) {
+ // TODO(pthatcher): Change RemoveRecvStream to take a stream argument.
+ channel->RemoveRecvStream(stream.first_ssrc());
+ }
+ RemoveStreamByIds(recv_streams, stream.groupid, stream.id);
+}
+
+void Call::OnReceivedTerminateReason(Session* session,
+ const std::string& reason) {
+ session_client_->session_manager()->signaling_thread()->Clear(this,
+ MSG_TERMINATECALL);
+ SignalReceivedTerminateReason(this, session, reason);
+}
+
+// TODO(mdodd): Get ride of this method since all Hangouts are using a secure
+// connection.
+bool Call::secure() const {
+ if (session_client_->secure() == SEC_DISABLED) {
+ return false;
+ }
+
+ bool ret = true;
+ int i = 0;
+
+ MediaSessionMap::const_iterator it;
+ for (it = media_session_map_.begin(); it != media_session_map_.end(); ++it) {
+ LOG_F(LS_VERBOSE) << "session[" << i
+ << "], check local and remote descriptions";
+ i++;
+
+ if (!SessionDescriptionContainsCrypto(
+ it->second.session->local_description()) ||
+ !SessionDescriptionContainsCrypto(
+ it->second.session->remote_description())) {
+ ret = false;
+ break;
+ }
+ }
+
+ LOG_F(LS_VERBOSE) << "secure=" << ret;
+ return ret;
+}
+
+bool Call::SessionDescriptionContainsCrypto(
+ const SessionDescription* sdesc) const {
+ if (sdesc == NULL) {
+ LOG_F(LS_VERBOSE) << "sessionDescription is NULL";
+ return false;
+ }
+
+ return ContentContainsCrypto(sdesc->GetContentByName(CN_AUDIO)) &&
+ ContentContainsCrypto(sdesc->GetContentByName(CN_VIDEO));
+}
+
+Session* Call::InternalInitiateSession(const std::string& id,
+ const buzz::Jid& to,
+ const std::string& initiator_name,
+ const CallOptions& options) {
+ const SessionDescription* offer = session_client_->CreateOffer(options);
+
+ Session* session = session_client_->CreateSession(id, this);
+ session->set_initiator_name(initiator_name);
+
+ AddSession(session, offer);
+ session->Initiate(to.Str(), offer);
+
+ // After this timeout, terminate the call because the callee isn't
+ // answering
+ session_client_->session_manager()->signaling_thread()->Clear(this,
+ MSG_TERMINATECALL);
+ session_client_->session_manager()->signaling_thread()->PostDelayed(
+ send_to_voicemail_ ? kSendToVoicemailTimeout : kNoVoicemailTimeout,
+ this, MSG_TERMINATECALL);
+ return session;
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/call.h b/chromium/third_party/libjingle/source/talk/session/media/call.h
new file mode 100644
index 00000000000..9b0a6c9c4a1
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/call.h
@@ -0,0 +1,284 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_CALL_H_
+#define TALK_SESSION_MEDIA_CALL_H_
+
+#include <deque>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "talk/base/messagequeue.h"
+#include "talk/media/base/mediachannel.h"
+#include "talk/media/base/screencastid.h"
+#include "talk/media/base/streamparams.h"
+#include "talk/media/base/videocommon.h"
+#include "talk/p2p/base/session.h"
+#include "talk/p2p/client/socketmonitor.h"
+#include "talk/session/media/audiomonitor.h"
+#include "talk/session/media/currentspeakermonitor.h"
+#include "talk/session/media/mediamessages.h"
+#include "talk/session/media/mediasession.h"
+#include "talk/xmpp/jid.h"
+
+namespace cricket {
+
+class MediaSessionClient;
+class BaseChannel;
+class VoiceChannel;
+class VideoChannel;
+class DataChannel;
+
+// Can't typedef this easily since it's forward declared as struct elsewhere.
+struct CallOptions : public MediaSessionOptions {
+};
+
+class Call : public talk_base::MessageHandler, public sigslot::has_slots<> {
+ public:
+ explicit Call(MediaSessionClient* session_client);
+ ~Call();
+
+ // |initiator| can be empty.
+ Session* InitiateSession(const buzz::Jid& to, const buzz::Jid& initiator,
+ const CallOptions& options);
+ Session* InitiateSession(const std::string& id, const buzz::Jid& to,
+ const CallOptions& options);
+ void AcceptSession(Session* session, const CallOptions& options);
+ void RejectSession(Session* session);
+ void TerminateSession(Session* session);
+ void Terminate();
+ bool SendViewRequest(Session* session,
+ const ViewRequest& view_request);
+ void SetLocalRenderer(VideoRenderer* renderer);
+ void SetVideoRenderer(Session* session, uint32 ssrc,
+ VideoRenderer* renderer);
+ void StartConnectionMonitor(Session* session, int cms);
+ void StopConnectionMonitor(Session* session);
+ void StartAudioMonitor(Session* session, int cms);
+ void StopAudioMonitor(Session* session);
+ bool IsAudioMonitorRunning(Session* session);
+ void StartSpeakerMonitor(Session* session);
+ void StopSpeakerMonitor(Session* session);
+ void Mute(bool mute);
+ void MuteVideo(bool mute);
+ bool SendData(Session* session,
+ const SendDataParams& params,
+ const talk_base::Buffer& payload,
+ SendDataResult* result);
+ void PressDTMF(int event);
+ bool StartScreencast(Session* session,
+ const std::string& stream_name, uint32 ssrc,
+ const ScreencastId& screencastid, int fps);
+ bool StopScreencast(Session* session,
+ const std::string& stream_name, uint32 ssrc);
+
+ std::vector<Session*> sessions();
+ uint32 id();
+ bool has_video() const { return has_video_; }
+ bool has_data() const { return has_data_; }
+ bool muted() const { return muted_; }
+ bool video() const { return has_video_; }
+ bool secure() const;
+ bool video_muted() const { return video_muted_; }
+ const std::vector<StreamParams>* GetDataRecvStreams(Session* session) const {
+ MediaStreams* recv_streams = GetMediaStreams(session);
+ return recv_streams ? &recv_streams->data() : NULL;
+ }
+ const std::vector<StreamParams>* GetVideoRecvStreams(Session* session) const {
+ MediaStreams* recv_streams = GetMediaStreams(session);
+ return recv_streams ? &recv_streams->video() : NULL;
+ }
+ const std::vector<StreamParams>* GetAudioRecvStreams(Session* session) const {
+ MediaStreams* recv_streams = GetMediaStreams(session);
+ return recv_streams ? &recv_streams->audio() : NULL;
+ }
+ // Public just for unit tests
+ VideoContentDescription* CreateVideoStreamUpdate(const StreamParams& stream);
+ // Takes ownership of video.
+ void SendVideoStreamUpdate(Session* session, VideoContentDescription* video);
+
+ // Setting this to false will cause the call to have a longer timeout and
+ // for the SignalSetupToCallVoicemail to never fire.
+ void set_send_to_voicemail(bool send_to_voicemail) {
+ send_to_voicemail_ = send_to_voicemail;
+ }
+ bool send_to_voicemail() { return send_to_voicemail_; }
+ const VoiceMediaInfo& last_voice_media_info() const {
+ return last_voice_media_info_;
+ }
+
+ // Sets a flag on the chatapp that will redirect the call to voicemail once
+ // the call has been terminated
+ sigslot::signal0<> SignalSetupToCallVoicemail;
+ sigslot::signal2<Call*, Session*> SignalAddSession;
+ sigslot::signal2<Call*, Session*> SignalRemoveSession;
+ sigslot::signal3<Call*, Session*, Session::State>
+ SignalSessionState;
+ sigslot::signal3<Call*, Session*, Session::Error>
+ SignalSessionError;
+ sigslot::signal3<Call*, Session*, const std::string &>
+ SignalReceivedTerminateReason;
+ sigslot::signal2<Call*, const std::vector<ConnectionInfo> &>
+ SignalConnectionMonitor;
+ sigslot::signal2<Call*, const VoiceMediaInfo&> SignalMediaMonitor;
+ sigslot::signal2<Call*, const AudioInfo&> SignalAudioMonitor;
+ // Empty nick on StreamParams means "unknown".
+ // No ssrcs in StreamParams means "no current speaker".
+ sigslot::signal3<Call*,
+ Session*,
+ const StreamParams&> SignalSpeakerMonitor;
+ sigslot::signal2<Call*, const std::vector<ConnectionInfo> &>
+ SignalVideoConnectionMonitor;
+ sigslot::signal2<Call*, const VideoMediaInfo&> SignalVideoMediaMonitor;
+ // Gives added streams and removed streams, in that order.
+ sigslot::signal4<Call*,
+ Session*,
+ const MediaStreams&,
+ const MediaStreams&> SignalMediaStreamsUpdate;
+ sigslot::signal3<Call*,
+ const ReceiveDataParams&,
+ const talk_base::Buffer&> SignalDataReceived;
+
+ private:
+ void OnMessage(talk_base::Message* message);
+ void OnSessionState(BaseSession* base_session, BaseSession::State state);
+ void OnSessionError(BaseSession* base_session, Session::Error error);
+ void OnSessionInfoMessage(
+ Session* session, const buzz::XmlElement* action_elem);
+ void OnViewRequest(
+ Session* session, const ViewRequest& view_request);
+ void OnRemoteDescriptionUpdate(
+ BaseSession* base_session, const ContentInfos& updated_contents);
+ void OnReceivedTerminateReason(Session* session, const std::string &reason);
+ void IncomingSession(Session* session, const SessionDescription* offer);
+ // Returns true on success.
+ bool AddSession(Session* session, const SessionDescription* offer);
+ void RemoveSession(Session* session);
+ void EnableChannels(bool enable);
+ void EnableSessionChannels(Session* session, bool enable);
+ void Join(Call* call, bool enable);
+ void OnConnectionMonitor(VoiceChannel* channel,
+ const std::vector<ConnectionInfo> &infos);
+ void OnMediaMonitor(VoiceChannel* channel, const VoiceMediaInfo& info);
+ void OnAudioMonitor(VoiceChannel* channel, const AudioInfo& info);
+ void OnSpeakerMonitor(CurrentSpeakerMonitor* monitor, uint32 ssrc);
+ void OnConnectionMonitor(VideoChannel* channel,
+ const std::vector<ConnectionInfo> &infos);
+ void OnMediaMonitor(VideoChannel* channel, const VideoMediaInfo& info);
+ void OnDataReceived(DataChannel* channel,
+ const ReceiveDataParams& params,
+ const talk_base::Buffer& payload);
+ VoiceChannel* GetVoiceChannel(Session* session) const;
+ VideoChannel* GetVideoChannel(Session* session) const;
+ DataChannel* GetDataChannel(Session* session) const;
+ MediaStreams* GetMediaStreams(Session* session) const;
+ void UpdateRemoteMediaStreams(Session* session,
+ const ContentInfos& updated_contents,
+ bool update_channels);
+ bool UpdateVoiceChannelRemoteContent(Session* session,
+ const AudioContentDescription* audio);
+ bool UpdateVideoChannelRemoteContent(Session* session,
+ const VideoContentDescription* video);
+ bool UpdateDataChannelRemoteContent(Session* session,
+ const DataContentDescription* data);
+ void UpdateRecvStreams(const std::vector<StreamParams>& update_streams,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams,
+ std::vector<StreamParams>* added_streams,
+ std::vector<StreamParams>* removed_streams);
+ void AddRecvStreams(const std::vector<StreamParams>& added_streams,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams);
+ void AddRecvStream(const StreamParams& stream,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams);
+ void RemoveRecvStreams(const std::vector<StreamParams>& removed_streams,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams);
+ void RemoveRecvStream(const StreamParams& stream,
+ BaseChannel* channel,
+ std::vector<StreamParams>* recv_streams);
+ void ContinuePlayDTMF();
+ bool StopScreencastWithoutSendingUpdate(Session* session, uint32 ssrc);
+ bool StopAllScreencastsWithoutSendingUpdate(Session* session);
+ bool SessionDescriptionContainsCrypto(const SessionDescription* sdesc) const;
+ Session* InternalInitiateSession(const std::string& id,
+ const buzz::Jid& to,
+ const std::string& initiator_name,
+ const CallOptions& options);
+
+ uint32 id_;
+ MediaSessionClient* session_client_;
+
+ struct StartedCapture {
+ StartedCapture(cricket::VideoCapturer* capturer,
+ const cricket::VideoFormat& format) :
+ capturer(capturer),
+ format(format) {
+ }
+ cricket::VideoCapturer* capturer;
+ cricket::VideoFormat format;
+ };
+ typedef std::map<uint32, StartedCapture> StartedScreencastMap;
+
+ struct MediaSession {
+ Session* session;
+ VoiceChannel* voice_channel;
+ VideoChannel* video_channel;
+ DataChannel* data_channel;
+ MediaStreams* recv_streams;
+ StartedScreencastMap started_screencasts;
+ };
+
+ // Create a map of media sessions, keyed off session->id().
+ typedef std::map<std::string, MediaSession> MediaSessionMap;
+ MediaSessionMap media_session_map_;
+
+ std::map<std::string, CurrentSpeakerMonitor*> speaker_monitor_map_;
+ VideoRenderer* local_renderer_;
+ bool has_video_;
+ bool has_data_;
+ bool muted_;
+ bool video_muted_;
+ bool send_to_voicemail_;
+
+ // DTMF tones have to be queued up so that we don't flood the call. We
+ // keep a deque (doubely ended queue) of them around. While one is playing we
+ // set the playing_dtmf_ bit and schedule a message in XX msec to clear that
+ // bit or start the next tone playing.
+ std::deque<int> queued_dtmf_;
+ bool playing_dtmf_;
+
+ VoiceMediaInfo last_voice_media_info_;
+
+ friend class MediaSessionClient;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_CALL_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/channel.cc b/chromium/third_party/libjingle/source/talk/session/media/channel.cc
new file mode 100644
index 00000000000..1bce2acdccb
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/channel.cc
@@ -0,0 +1,2736 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/channel.h"
+
+#include "talk/base/buffer.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+#include "talk/media/base/rtputils.h"
+#include "talk/p2p/base/transportchannel.h"
+#include "talk/session/media/channelmanager.h"
+#include "talk/session/media/mediamessages.h"
+#include "talk/session/media/rtcpmuxfilter.h"
+#include "talk/session/media/typingmonitor.h"
+
+
+namespace cricket {
+
+enum {
+ MSG_ENABLE = 1,
+ MSG_DISABLE,
+ MSG_MUTESTREAM,
+ MSG_ISSTREAMMUTED,
+ MSG_SETREMOTECONTENT,
+ MSG_SETLOCALCONTENT,
+ MSG_EARLYMEDIATIMEOUT,
+ MSG_CANINSERTDTMF,
+ MSG_INSERTDTMF,
+ MSG_GETSTATS,
+ MSG_SETRENDERER,
+ MSG_ADDRECVSTREAM,
+ MSG_REMOVERECVSTREAM,
+ MSG_SETRINGBACKTONE,
+ MSG_PLAYRINGBACKTONE,
+ MSG_SETMAXSENDBANDWIDTH,
+ MSG_ADDSCREENCAST,
+ MSG_REMOVESCREENCAST,
+ MSG_SENDINTRAFRAME,
+ MSG_REQUESTINTRAFRAME,
+ MSG_SCREENCASTWINDOWEVENT,
+ MSG_RTPPACKET,
+ MSG_RTCPPACKET,
+ MSG_CHANNEL_ERROR,
+ MSG_SETCHANNELOPTIONS,
+ MSG_SCALEVOLUME,
+ MSG_HANDLEVIEWREQUEST,
+ MSG_READYTOSENDDATA,
+ MSG_SENDDATA,
+ MSG_DATARECEIVED,
+ MSG_SETCAPTURER,
+ MSG_ISSCREENCASTING,
+ MSG_SCREENCASTFPS,
+ MSG_SETSCREENCASTFACTORY,
+ MSG_FIRSTPACKETRECEIVED,
+ MSG_SESSION_ERROR,
+};
+
+// Value specified in RFC 5764.
+static const char kDtlsSrtpExporterLabel[] = "EXTRACTOR-dtls_srtp";
+
+static const int kAgcMinus10db = -10;
+
+// TODO(hellner): use the device manager for creation of screen capturers when
+// the cl enabling it has landed.
+class NullScreenCapturerFactory : public VideoChannel::ScreenCapturerFactory {
+ public:
+ VideoCapturer* CreateScreenCapturer(const ScreencastId& window) {
+ return NULL;
+ }
+};
+
+
+VideoChannel::ScreenCapturerFactory* CreateScreenCapturerFactory() {
+ return new NullScreenCapturerFactory();
+}
+
+struct SetContentData : public talk_base::MessageData {
+ SetContentData(const MediaContentDescription* content, ContentAction action)
+ : content(content),
+ action(action),
+ result(false) {
+ }
+ const MediaContentDescription* content;
+ ContentAction action;
+ bool result;
+};
+
+struct SetBandwidthData : public talk_base::MessageData {
+ explicit SetBandwidthData(int value) : value(value), result(false) {}
+ int value;
+ bool result;
+};
+
+struct SetRingbackToneMessageData : public talk_base::MessageData {
+ SetRingbackToneMessageData(const void* b, int l)
+ : buf(b),
+ len(l),
+ result(false) {
+ }
+ const void* buf;
+ int len;
+ bool result;
+};
+
+struct PlayRingbackToneMessageData : public talk_base::MessageData {
+ PlayRingbackToneMessageData(uint32 s, bool p, bool l)
+ : ssrc(s),
+ play(p),
+ loop(l),
+ result(false) {
+ }
+ uint32 ssrc;
+ bool play;
+ bool loop;
+ bool result;
+};
+typedef talk_base::TypedMessageData<bool> BoolMessageData;
+struct DtmfMessageData : public talk_base::MessageData {
+ DtmfMessageData(uint32 ssrc, int event, int duration, int flags)
+ : ssrc(ssrc),
+ event(event),
+ duration(duration),
+ flags(flags),
+ result(false) {
+ }
+ uint32 ssrc;
+ int event;
+ int duration;
+ int flags;
+ bool result;
+};
+struct ScaleVolumeMessageData : public talk_base::MessageData {
+ ScaleVolumeMessageData(uint32 s, double l, double r)
+ : ssrc(s),
+ left(l),
+ right(r),
+ result(false) {
+ }
+ uint32 ssrc;
+ double left;
+ double right;
+ bool result;
+};
+
+struct VoiceStatsMessageData : public talk_base::MessageData {
+ explicit VoiceStatsMessageData(VoiceMediaInfo* stats)
+ : result(false),
+ stats(stats) {
+ }
+ bool result;
+ VoiceMediaInfo* stats;
+};
+
+struct VideoStatsMessageData : public talk_base::MessageData {
+ explicit VideoStatsMessageData(VideoMediaInfo* stats)
+ : result(false),
+ stats(stats) {
+ }
+ bool result;
+ VideoMediaInfo* stats;
+};
+
+struct PacketMessageData : public talk_base::MessageData {
+ talk_base::Buffer packet;
+};
+
+struct AudioRenderMessageData: public talk_base::MessageData {
+ AudioRenderMessageData(uint32 s, AudioRenderer* r, bool l)
+ : ssrc(s), renderer(r), is_local(l), result(false) {}
+ uint32 ssrc;
+ AudioRenderer* renderer;
+ bool is_local;
+ bool result;
+};
+
+struct VideoRenderMessageData : public talk_base::MessageData {
+ VideoRenderMessageData(uint32 s, VideoRenderer* r) : ssrc(s), renderer(r) {}
+ uint32 ssrc;
+ VideoRenderer* renderer;
+};
+
+struct AddScreencastMessageData : public talk_base::MessageData {
+ AddScreencastMessageData(uint32 s, const ScreencastId& id)
+ : ssrc(s),
+ window_id(id),
+ result(NULL) {
+ }
+ uint32 ssrc;
+ ScreencastId window_id;
+ VideoCapturer* result;
+};
+
+struct RemoveScreencastMessageData : public talk_base::MessageData {
+ explicit RemoveScreencastMessageData(uint32 s) : ssrc(s), result(false) {}
+ uint32 ssrc;
+ bool result;
+};
+
+struct ScreencastEventMessageData : public talk_base::MessageData {
+ ScreencastEventMessageData(uint32 s, talk_base::WindowEvent we)
+ : ssrc(s),
+ event(we) {
+ }
+ uint32 ssrc;
+ talk_base::WindowEvent event;
+};
+
+struct ViewRequestMessageData : public talk_base::MessageData {
+ explicit ViewRequestMessageData(const ViewRequest& r)
+ : request(r),
+ result(false) {
+ }
+ ViewRequest request;
+ bool result;
+};
+
+struct VoiceChannelErrorMessageData : public talk_base::MessageData {
+ VoiceChannelErrorMessageData(uint32 in_ssrc,
+ VoiceMediaChannel::Error in_error)
+ : ssrc(in_ssrc),
+ error(in_error) {
+ }
+ uint32 ssrc;
+ VoiceMediaChannel::Error error;
+};
+
+struct VideoChannelErrorMessageData : public talk_base::MessageData {
+ VideoChannelErrorMessageData(uint32 in_ssrc,
+ VideoMediaChannel::Error in_error)
+ : ssrc(in_ssrc),
+ error(in_error) {
+ }
+ uint32 ssrc;
+ VideoMediaChannel::Error error;
+};
+
+struct DataChannelErrorMessageData : public talk_base::MessageData {
+ DataChannelErrorMessageData(uint32 in_ssrc,
+ DataMediaChannel::Error in_error)
+ : ssrc(in_ssrc),
+ error(in_error) {}
+ uint32 ssrc;
+ DataMediaChannel::Error error;
+};
+
+struct SessionErrorMessageData : public talk_base::MessageData {
+ explicit SessionErrorMessageData(cricket::BaseSession::Error error)
+ : error_(error) {}
+
+ BaseSession::Error error_;
+};
+
+struct SsrcMessageData : public talk_base::MessageData {
+ explicit SsrcMessageData(uint32 ssrc) : ssrc(ssrc), result(false) {}
+ uint32 ssrc;
+ bool result;
+};
+
+struct StreamMessageData : public talk_base::MessageData {
+ explicit StreamMessageData(const StreamParams& in_sp)
+ : sp(in_sp),
+ result(false) {
+ }
+ StreamParams sp;
+ bool result;
+};
+
+struct MuteStreamData : public talk_base::MessageData {
+ MuteStreamData(uint32 ssrc, bool mute)
+ : ssrc(ssrc), mute(mute), result(false) {}
+ uint32 ssrc;
+ bool mute;
+ bool result;
+};
+
+struct AudioOptionsMessageData : public talk_base::MessageData {
+ explicit AudioOptionsMessageData(const AudioOptions& options)
+ : options(options),
+ result(false) {
+ }
+ AudioOptions options;
+ bool result;
+};
+
+struct VideoOptionsMessageData : public talk_base::MessageData {
+ explicit VideoOptionsMessageData(const VideoOptions& options)
+ : options(options),
+ result(false) {
+ }
+ VideoOptions options;
+ bool result;
+};
+
+struct SetCapturerMessageData : public talk_base::MessageData {
+ SetCapturerMessageData(uint32 s, VideoCapturer* c)
+ : ssrc(s),
+ capturer(c),
+ result(false) {
+ }
+ uint32 ssrc;
+ VideoCapturer* capturer;
+ bool result;
+};
+
+struct IsScreencastingMessageData : public talk_base::MessageData {
+ IsScreencastingMessageData()
+ : result(false) {
+ }
+ bool result;
+};
+
+struct ScreencastFpsMessageData : public talk_base::MessageData {
+ explicit ScreencastFpsMessageData(uint32 s)
+ : ssrc(s), result(0) {
+ }
+ uint32 ssrc;
+ int result;
+};
+
+struct SetScreenCaptureFactoryMessageData : public talk_base::MessageData {
+ explicit SetScreenCaptureFactoryMessageData(
+ VideoChannel::ScreenCapturerFactory* f)
+ : screencapture_factory(f) {
+ }
+ VideoChannel::ScreenCapturerFactory* screencapture_factory;
+};
+
+static const char* PacketType(bool rtcp) {
+ return (!rtcp) ? "RTP" : "RTCP";
+}
+
+static bool ValidPacket(bool rtcp, const talk_base::Buffer* packet) {
+ // Check the packet size. We could check the header too if needed.
+ return (packet &&
+ packet->length() >= (!rtcp ? kMinRtpPacketLen : kMinRtcpPacketLen) &&
+ packet->length() <= kMaxRtpPacketLen);
+}
+
+static bool IsReceiveContentDirection(MediaContentDirection direction) {
+ return direction == MD_SENDRECV || direction == MD_RECVONLY;
+}
+
+static bool IsSendContentDirection(MediaContentDirection direction) {
+ return direction == MD_SENDRECV || direction == MD_SENDONLY;
+}
+
+static const MediaContentDescription* GetContentDescription(
+ const ContentInfo* cinfo) {
+ if (cinfo == NULL)
+ return NULL;
+ return static_cast<const MediaContentDescription*>(cinfo->description);
+}
+
+BaseChannel::BaseChannel(talk_base::Thread* thread,
+ MediaEngineInterface* media_engine,
+ MediaChannel* media_channel, BaseSession* session,
+ const std::string& content_name, bool rtcp)
+ : worker_thread_(thread),
+ media_engine_(media_engine),
+ session_(session),
+ media_channel_(media_channel),
+ content_name_(content_name),
+ rtcp_(rtcp),
+ transport_channel_(NULL),
+ rtcp_transport_channel_(NULL),
+ enabled_(false),
+ writable_(false),
+ rtp_ready_to_send_(false),
+ rtcp_ready_to_send_(false),
+ optimistic_data_send_(false),
+ was_ever_writable_(false),
+ local_content_direction_(MD_INACTIVE),
+ remote_content_direction_(MD_INACTIVE),
+ has_received_packet_(false),
+ dtls_keyed_(false),
+ secure_required_(false) {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ LOG(LS_INFO) << "Created channel for " << content_name;
+}
+
+BaseChannel::~BaseChannel() {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ StopConnectionMonitor();
+ FlushRtcpMessages(); // Send any outstanding RTCP packets.
+ Clear(); // eats any outstanding messages or packets
+ // We must destroy the media channel before the transport channel, otherwise
+ // the media channel may try to send on the dead transport channel. NULLing
+ // is not an effective strategy since the sends will come on another thread.
+ delete media_channel_;
+ set_rtcp_transport_channel(NULL);
+ if (transport_channel_ != NULL)
+ session_->DestroyChannel(content_name_, transport_channel_->component());
+ LOG(LS_INFO) << "Destroyed channel";
+}
+
+bool BaseChannel::Init(TransportChannel* transport_channel,
+ TransportChannel* rtcp_transport_channel) {
+ if (transport_channel == NULL) {
+ return false;
+ }
+ if (rtcp() && rtcp_transport_channel == NULL) {
+ return false;
+ }
+ transport_channel_ = transport_channel;
+
+ if (!SetDtlsSrtpCiphers(transport_channel_, false)) {
+ return false;
+ }
+
+ media_channel_->SetInterface(this);
+ transport_channel_->SignalWritableState.connect(
+ this, &BaseChannel::OnWritableState);
+ transport_channel_->SignalReadPacket.connect(
+ this, &BaseChannel::OnChannelRead);
+ transport_channel_->SignalReadyToSend.connect(
+ this, &BaseChannel::OnReadyToSend);
+
+ session_->SignalNewLocalDescription.connect(
+ this, &BaseChannel::OnNewLocalDescription);
+ session_->SignalNewRemoteDescription.connect(
+ this, &BaseChannel::OnNewRemoteDescription);
+
+ set_rtcp_transport_channel(rtcp_transport_channel);
+ return true;
+}
+
+// Can be called from thread other than worker thread
+bool BaseChannel::Enable(bool enable) {
+ Send(enable ? MSG_ENABLE : MSG_DISABLE);
+ return true;
+}
+
+// Can be called from thread other than worker thread
+bool BaseChannel::MuteStream(uint32 ssrc, bool mute) {
+ MuteStreamData data(ssrc, mute);
+ Send(MSG_MUTESTREAM, &data);
+ return data.result;
+}
+
+bool BaseChannel::IsStreamMuted(uint32 ssrc) {
+ SsrcMessageData data(ssrc);
+ Send(MSG_ISSTREAMMUTED, &data);
+ return data.result;
+}
+
+bool BaseChannel::AddRecvStream(const StreamParams& sp) {
+ StreamMessageData data(sp);
+ Send(MSG_ADDRECVSTREAM, &data);
+ return data.result;
+}
+
+bool BaseChannel::RemoveRecvStream(uint32 ssrc) {
+ SsrcMessageData data(ssrc);
+ Send(MSG_REMOVERECVSTREAM, &data);
+ return data.result;
+}
+
+bool BaseChannel::SetLocalContent(const MediaContentDescription* content,
+ ContentAction action) {
+ SetContentData data(content, action);
+ Send(MSG_SETLOCALCONTENT, &data);
+ return data.result;
+}
+
+bool BaseChannel::SetRemoteContent(const MediaContentDescription* content,
+ ContentAction action) {
+ SetContentData data(content, action);
+ Send(MSG_SETREMOTECONTENT, &data);
+ return data.result;
+}
+
+bool BaseChannel::SetMaxSendBandwidth(int max_bandwidth) {
+ SetBandwidthData data(max_bandwidth);
+ Send(MSG_SETMAXSENDBANDWIDTH, &data);
+ return data.result;
+}
+
+void BaseChannel::StartConnectionMonitor(int cms) {
+ socket_monitor_.reset(new SocketMonitor(transport_channel_,
+ worker_thread(),
+ talk_base::Thread::Current()));
+ socket_monitor_->SignalUpdate.connect(
+ this, &BaseChannel::OnConnectionMonitorUpdate);
+ socket_monitor_->Start(cms);
+}
+
+void BaseChannel::StopConnectionMonitor() {
+ if (socket_monitor_) {
+ socket_monitor_->Stop();
+ socket_monitor_.reset();
+ }
+}
+
+void BaseChannel::set_rtcp_transport_channel(TransportChannel* channel) {
+ if (rtcp_transport_channel_ != channel) {
+ if (rtcp_transport_channel_) {
+ session_->DestroyChannel(
+ content_name_, rtcp_transport_channel_->component());
+ }
+ rtcp_transport_channel_ = channel;
+ if (rtcp_transport_channel_) {
+ // TODO(juberti): Propagate this error code
+ VERIFY(SetDtlsSrtpCiphers(rtcp_transport_channel_, true));
+ rtcp_transport_channel_->SignalWritableState.connect(
+ this, &BaseChannel::OnWritableState);
+ rtcp_transport_channel_->SignalReadPacket.connect(
+ this, &BaseChannel::OnChannelRead);
+ rtcp_transport_channel_->SignalReadyToSend.connect(
+ this, &BaseChannel::OnReadyToSend);
+ }
+ }
+}
+
+bool BaseChannel::IsReadyToReceive() const {
+ // Receive data if we are enabled and have local content,
+ return enabled() && IsReceiveContentDirection(local_content_direction_);
+}
+
+bool BaseChannel::IsReadyToSend() const {
+ // Send outgoing data if we are enabled, have local and remote content,
+ // and we have had some form of connectivity.
+ return enabled() &&
+ IsReceiveContentDirection(remote_content_direction_) &&
+ IsSendContentDirection(local_content_direction_) &&
+ was_ever_writable();
+}
+
+bool BaseChannel::SendPacket(talk_base::Buffer* packet) {
+ return SendPacket(false, packet);
+}
+
+bool BaseChannel::SendRtcp(talk_base::Buffer* packet) {
+ return SendPacket(true, packet);
+}
+
+int BaseChannel::SetOption(SocketType type, talk_base::Socket::Option opt,
+ int value) {
+ switch (type) {
+ case ST_RTP: return transport_channel_->SetOption(opt, value);
+ case ST_RTCP: return rtcp_transport_channel_->SetOption(opt, value);
+ default: return -1;
+ }
+}
+
+void BaseChannel::OnWritableState(TransportChannel* channel) {
+ ASSERT(channel == transport_channel_ || channel == rtcp_transport_channel_);
+ if (transport_channel_->writable()
+ && (!rtcp_transport_channel_ || rtcp_transport_channel_->writable())) {
+ ChannelWritable_w();
+ } else {
+ ChannelNotWritable_w();
+ }
+}
+
+void BaseChannel::OnChannelRead(TransportChannel* channel,
+ const char* data, size_t len, int flags) {
+ // OnChannelRead gets called from P2PSocket; now pass data to MediaEngine
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+
+ // When using RTCP multiplexing we might get RTCP packets on the RTP
+ // transport. We feed RTP traffic into the demuxer to determine if it is RTCP.
+ bool rtcp = PacketIsRtcp(channel, data, len);
+ talk_base::Buffer packet(data, len);
+ HandlePacket(rtcp, &packet);
+}
+
+void BaseChannel::OnReadyToSend(TransportChannel* channel) {
+ SetReadyToSend(channel, true);
+}
+
+void BaseChannel::SetReadyToSend(TransportChannel* channel, bool ready) {
+ ASSERT(channel == transport_channel_ || channel == rtcp_transport_channel_);
+ if (channel == transport_channel_) {
+ rtp_ready_to_send_ = ready;
+ }
+ if (channel == rtcp_transport_channel_) {
+ rtcp_ready_to_send_ = ready;
+ }
+
+ if (!ready) {
+ // Notify the MediaChannel when either rtp or rtcp channel can't send.
+ media_channel_->OnReadyToSend(false);
+ } else if (rtp_ready_to_send_ &&
+ // In the case of rtcp mux |rtcp_transport_channel_| will be null.
+ (rtcp_ready_to_send_ || !rtcp_transport_channel_)) {
+ // Notify the MediaChannel when both rtp and rtcp channel can send.
+ media_channel_->OnReadyToSend(true);
+ }
+}
+
+bool BaseChannel::PacketIsRtcp(const TransportChannel* channel,
+ const char* data, size_t len) {
+ return (channel == rtcp_transport_channel_ ||
+ rtcp_mux_filter_.DemuxRtcp(data, static_cast<int>(len)));
+}
+
+bool BaseChannel::SendPacket(bool rtcp, talk_base::Buffer* packet) {
+ // Unless we're sending optimistically, we only allow packets through when we
+ // are completely writable.
+ if (!optimistic_data_send_ && !writable_) {
+ return false;
+ }
+
+ // SendPacket gets called from MediaEngine, typically on an encoder thread.
+ // If the thread is not our worker thread, we will post to our worker
+ // so that the real work happens on our worker. This avoids us having to
+ // synchronize access to all the pieces of the send path, including
+ // SRTP and the inner workings of the transport channels.
+ // The only downside is that we can't return a proper failure code if
+ // needed. Since UDP is unreliable anyway, this should be a non-issue.
+ if (talk_base::Thread::Current() != worker_thread_) {
+ // Avoid a copy by transferring the ownership of the packet data.
+ int message_id = (!rtcp) ? MSG_RTPPACKET : MSG_RTCPPACKET;
+ PacketMessageData* data = new PacketMessageData;
+ packet->TransferTo(&data->packet);
+ worker_thread_->Post(this, message_id, data);
+ return true;
+ }
+
+ // Now that we are on the correct thread, ensure we have a place to send this
+ // packet before doing anything. (We might get RTCP packets that we don't
+ // intend to send.) If we've negotiated RTCP mux, send RTCP over the RTP
+ // transport.
+ TransportChannel* channel = (!rtcp || rtcp_mux_filter_.IsActive()) ?
+ transport_channel_ : rtcp_transport_channel_;
+ if (!channel || (!optimistic_data_send_ && !channel->writable())) {
+ return false;
+ }
+
+ // Protect ourselves against crazy data.
+ if (!ValidPacket(rtcp, packet)) {
+ LOG(LS_ERROR) << "Dropping outgoing " << content_name_ << " "
+ << PacketType(rtcp) << " packet: wrong size="
+ << packet->length();
+ return false;
+ }
+
+ // Signal to the media sink before protecting the packet.
+ {
+ talk_base::CritScope cs(&signal_send_packet_cs_);
+ SignalSendPacketPreCrypto(packet->data(), packet->length(), rtcp);
+ }
+
+ // Protect if needed.
+ if (srtp_filter_.IsActive()) {
+ bool res;
+ char* data = packet->data();
+ int len = static_cast<int>(packet->length());
+ if (!rtcp) {
+ res = srtp_filter_.ProtectRtp(data, len,
+ static_cast<int>(packet->capacity()), &len);
+ if (!res) {
+ int seq_num = -1;
+ uint32 ssrc = 0;
+ GetRtpSeqNum(data, len, &seq_num);
+ GetRtpSsrc(data, len, &ssrc);
+ LOG(LS_ERROR) << "Failed to protect " << content_name_
+ << " RTP packet: size=" << len
+ << ", seqnum=" << seq_num << ", SSRC=" << ssrc;
+ return false;
+ }
+ } else {
+ res = srtp_filter_.ProtectRtcp(data, len,
+ static_cast<int>(packet->capacity()),
+ &len);
+ if (!res) {
+ int type = -1;
+ GetRtcpType(data, len, &type);
+ LOG(LS_ERROR) << "Failed to protect " << content_name_
+ << " RTCP packet: size=" << len << ", type=" << type;
+ return false;
+ }
+ }
+
+ // Update the length of the packet now that we've added the auth tag.
+ packet->SetLength(len);
+ } else if (secure_required_) {
+ // This is a double check for something that supposedly can't happen.
+ LOG(LS_ERROR) << "Can't send outgoing " << PacketType(rtcp)
+ << " packet when SRTP is inactive and crypto is required";
+
+ ASSERT(false);
+ return false;
+ }
+
+ // Signal to the media sink after protecting the packet.
+ {
+ talk_base::CritScope cs(&signal_send_packet_cs_);
+ SignalSendPacketPostCrypto(packet->data(), packet->length(), rtcp);
+ }
+
+ // Bon voyage.
+ int ret = channel->SendPacket(packet->data(), packet->length(),
+ (secure() && secure_dtls()) ? PF_SRTP_BYPASS : 0);
+ if (ret != static_cast<int>(packet->length())) {
+ if (channel->GetError() == EWOULDBLOCK) {
+ LOG(LS_WARNING) << "Got EWOULDBLOCK from socket.";
+ SetReadyToSend(channel, false);
+ }
+ return false;
+ }
+ return true;
+}
+
+bool BaseChannel::WantsPacket(bool rtcp, talk_base::Buffer* packet) {
+ // Protect ourselves against crazy data.
+ if (!ValidPacket(rtcp, packet)) {
+ LOG(LS_ERROR) << "Dropping incoming " << content_name_ << " "
+ << PacketType(rtcp) << " packet: wrong size="
+ << packet->length();
+ return false;
+ }
+ // If this channel is suppose to handle RTP data, that is determined by
+ // checking against ssrc filter. This is necessary to do it here to avoid
+ // double decryption.
+ if (ssrc_filter_.IsActive() &&
+ !ssrc_filter_.DemuxPacket(packet->data(), packet->length(), rtcp)) {
+ return false;
+ }
+
+ return true;
+}
+
+void BaseChannel::HandlePacket(bool rtcp, talk_base::Buffer* packet) {
+ if (!WantsPacket(rtcp, packet)) {
+ return;
+ }
+
+ if (!has_received_packet_) {
+ has_received_packet_ = true;
+ signaling_thread()->Post(this, MSG_FIRSTPACKETRECEIVED);
+ }
+
+ // Signal to the media sink before unprotecting the packet.
+ {
+ talk_base::CritScope cs(&signal_recv_packet_cs_);
+ SignalRecvPacketPostCrypto(packet->data(), packet->length(), rtcp);
+ }
+
+ // Unprotect the packet, if needed.
+ if (srtp_filter_.IsActive()) {
+ char* data = packet->data();
+ int len = static_cast<int>(packet->length());
+ bool res;
+ if (!rtcp) {
+ res = srtp_filter_.UnprotectRtp(data, len, &len);
+ if (!res) {
+ int seq_num = -1;
+ uint32 ssrc = 0;
+ GetRtpSeqNum(data, len, &seq_num);
+ GetRtpSsrc(data, len, &ssrc);
+ LOG(LS_ERROR) << "Failed to unprotect " << content_name_
+ << " RTP packet: size=" << len
+ << ", seqnum=" << seq_num << ", SSRC=" << ssrc;
+ return;
+ }
+ } else {
+ res = srtp_filter_.UnprotectRtcp(data, len, &len);
+ if (!res) {
+ int type = -1;
+ GetRtcpType(data, len, &type);
+ LOG(LS_ERROR) << "Failed to unprotect " << content_name_
+ << " RTCP packet: size=" << len << ", type=" << type;
+ return;
+ }
+ }
+
+ packet->SetLength(len);
+ } else if (secure_required_) {
+ // Our session description indicates that SRTP is required, but we got a
+ // packet before our SRTP filter is active. This means either that
+ // a) we got SRTP packets before we received the SDES keys, in which case
+ // we can't decrypt it anyway, or
+ // b) we got SRTP packets before DTLS completed on both the RTP and RTCP
+ // channels, so we haven't yet extracted keys, even if DTLS did complete
+ // on the channel that the packets are being sent on. It's really good
+ // practice to wait for both RTP and RTCP to be good to go before sending
+ // media, to prevent weird failure modes, so it's fine for us to just eat
+ // packets here. This is all sidestepped if RTCP mux is used anyway.
+ LOG(LS_WARNING) << "Can't process incoming " << PacketType(rtcp)
+ << " packet when SRTP is inactive and crypto is required";
+ return;
+ }
+
+ // Signal to the media sink after unprotecting the packet.
+ {
+ talk_base::CritScope cs(&signal_recv_packet_cs_);
+ SignalRecvPacketPreCrypto(packet->data(), packet->length(), rtcp);
+ }
+
+ // Push it down to the media channel.
+ if (!rtcp) {
+ media_channel_->OnPacketReceived(packet);
+ } else {
+ media_channel_->OnRtcpReceived(packet);
+ }
+}
+
+void BaseChannel::OnNewLocalDescription(
+ BaseSession* session, ContentAction action) {
+ const ContentInfo* content_info =
+ GetFirstContent(session->local_description());
+ const MediaContentDescription* content_desc =
+ GetContentDescription(content_info);
+ if (content_desc && content_info && !content_info->rejected &&
+ !SetLocalContent(content_desc, action)) {
+ LOG(LS_ERROR) << "Failure in SetLocalContent with action " << action;
+ session->SetError(BaseSession::ERROR_CONTENT);
+ }
+}
+
+void BaseChannel::OnNewRemoteDescription(
+ BaseSession* session, ContentAction action) {
+ const ContentInfo* content_info =
+ GetFirstContent(session->remote_description());
+ const MediaContentDescription* content_desc =
+ GetContentDescription(content_info);
+ if (content_desc && content_info && !content_info->rejected &&
+ !SetRemoteContent(content_desc, action)) {
+ LOG(LS_ERROR) << "Failure in SetRemoteContent with action " << action;
+ session->SetError(BaseSession::ERROR_CONTENT);
+ }
+}
+
+void BaseChannel::EnableMedia_w() {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ if (enabled_)
+ return;
+
+ LOG(LS_INFO) << "Channel enabled";
+ enabled_ = true;
+ ChangeState();
+}
+
+void BaseChannel::DisableMedia_w() {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ if (!enabled_)
+ return;
+
+ LOG(LS_INFO) << "Channel disabled";
+ enabled_ = false;
+ ChangeState();
+}
+
+bool BaseChannel::MuteStream_w(uint32 ssrc, bool mute) {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ bool ret = media_channel()->MuteStream(ssrc, mute);
+ if (ret) {
+ if (mute)
+ muted_streams_.insert(ssrc);
+ else
+ muted_streams_.erase(ssrc);
+ }
+ return ret;
+}
+
+bool BaseChannel::IsStreamMuted_w(uint32 ssrc) {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ return muted_streams_.find(ssrc) != muted_streams_.end();
+}
+
+void BaseChannel::ChannelWritable_w() {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ if (writable_)
+ return;
+
+ LOG(LS_INFO) << "Channel socket writable ("
+ << transport_channel_->content_name() << ", "
+ << transport_channel_->component() << ")"
+ << (was_ever_writable_ ? "" : " for the first time");
+
+ std::vector<ConnectionInfo> infos;
+ transport_channel_->GetStats(&infos);
+ for (std::vector<ConnectionInfo>::const_iterator it = infos.begin();
+ it != infos.end(); ++it) {
+ if (it->best_connection) {
+ LOG(LS_INFO) << "Using " << it->local_candidate.ToSensitiveString()
+ << "->" << it->remote_candidate.ToSensitiveString();
+ break;
+ }
+ }
+
+ // If we're doing DTLS-SRTP, now is the time.
+ if (!was_ever_writable_ && ShouldSetupDtlsSrtp()) {
+ if (!SetupDtlsSrtp(false)) {
+ LOG(LS_ERROR) << "Couldn't finish DTLS-SRTP on RTP channel";
+ SessionErrorMessageData data(BaseSession::ERROR_TRANSPORT);
+ // Sent synchronously.
+ signaling_thread()->Send(this, MSG_SESSION_ERROR, &data);
+ return;
+ }
+
+ if (rtcp_transport_channel_) {
+ if (!SetupDtlsSrtp(true)) {
+ LOG(LS_ERROR) << "Couldn't finish DTLS-SRTP on RTCP channel";
+ SessionErrorMessageData data(BaseSession::ERROR_TRANSPORT);
+ // Sent synchronously.
+ signaling_thread()->Send(this, MSG_SESSION_ERROR, &data);
+ return;
+ }
+ }
+ }
+
+ was_ever_writable_ = true;
+ writable_ = true;
+ ChangeState();
+}
+
+bool BaseChannel::SetDtlsSrtpCiphers(TransportChannel *tc, bool rtcp) {
+ std::vector<std::string> ciphers;
+ // We always use the default SRTP ciphers for RTCP, but we may use different
+ // ciphers for RTP depending on the media type.
+ if (!rtcp) {
+ GetSrtpCiphers(&ciphers);
+ } else {
+ GetSupportedDefaultCryptoSuites(&ciphers);
+ }
+ return tc->SetSrtpCiphers(ciphers);
+}
+
+bool BaseChannel::ShouldSetupDtlsSrtp() const {
+ return true;
+}
+
+// This function returns true if either DTLS-SRTP is not in use
+// *or* DTLS-SRTP is successfully set up.
+bool BaseChannel::SetupDtlsSrtp(bool rtcp_channel) {
+ bool ret = false;
+
+ TransportChannel *channel = rtcp_channel ?
+ rtcp_transport_channel_ : transport_channel_;
+
+ // No DTLS
+ if (!channel->IsDtlsActive())
+ return true;
+
+ std::string selected_cipher;
+
+ if (!channel->GetSrtpCipher(&selected_cipher)) {
+ LOG(LS_ERROR) << "No DTLS-SRTP selected cipher";
+ return false;
+ }
+
+ LOG(LS_INFO) << "Installing keys from DTLS-SRTP on "
+ << content_name() << " "
+ << PacketType(rtcp_channel);
+
+ // OK, we're now doing DTLS (RFC 5764)
+ std::vector<unsigned char> dtls_buffer(SRTP_MASTER_KEY_KEY_LEN * 2 +
+ SRTP_MASTER_KEY_SALT_LEN * 2);
+
+ // RFC 5705 exporter using the RFC 5764 parameters
+ if (!channel->ExportKeyingMaterial(
+ kDtlsSrtpExporterLabel,
+ NULL, 0, false,
+ &dtls_buffer[0], dtls_buffer.size())) {
+ LOG(LS_WARNING) << "DTLS-SRTP key export failed";
+ ASSERT(false); // This should never happen
+ return false;
+ }
+
+ // Sync up the keys with the DTLS-SRTP interface
+ std::vector<unsigned char> client_write_key(SRTP_MASTER_KEY_KEY_LEN +
+ SRTP_MASTER_KEY_SALT_LEN);
+ std::vector<unsigned char> server_write_key(SRTP_MASTER_KEY_KEY_LEN +
+ SRTP_MASTER_KEY_SALT_LEN);
+ size_t offset = 0;
+ memcpy(&client_write_key[0], &dtls_buffer[offset],
+ SRTP_MASTER_KEY_KEY_LEN);
+ offset += SRTP_MASTER_KEY_KEY_LEN;
+ memcpy(&server_write_key[0], &dtls_buffer[offset],
+ SRTP_MASTER_KEY_KEY_LEN);
+ offset += SRTP_MASTER_KEY_KEY_LEN;
+ memcpy(&client_write_key[SRTP_MASTER_KEY_KEY_LEN],
+ &dtls_buffer[offset], SRTP_MASTER_KEY_SALT_LEN);
+ offset += SRTP_MASTER_KEY_SALT_LEN;
+ memcpy(&server_write_key[SRTP_MASTER_KEY_KEY_LEN],
+ &dtls_buffer[offset], SRTP_MASTER_KEY_SALT_LEN);
+
+ std::vector<unsigned char> *send_key, *recv_key;
+
+ if (channel->GetIceRole() == ICEROLE_CONTROLLING) {
+ send_key = &server_write_key;
+ recv_key = &client_write_key;
+ } else {
+ send_key = &client_write_key;
+ recv_key = &server_write_key;
+ }
+
+ if (rtcp_channel) {
+ ret = srtp_filter_.SetRtcpParams(
+ selected_cipher,
+ &(*send_key)[0],
+ static_cast<int>(send_key->size()),
+ selected_cipher,
+ &(*recv_key)[0],
+ static_cast<int>(recv_key->size()));
+ } else {
+ ret = srtp_filter_.SetRtpParams(
+ selected_cipher,
+ &(*send_key)[0],
+ static_cast<int>(send_key->size()),
+ selected_cipher,
+ &(*recv_key)[0],
+ static_cast<int>(recv_key->size()));
+ }
+
+ if (!ret)
+ LOG(LS_WARNING) << "DTLS-SRTP key installation failed";
+ else
+ dtls_keyed_ = true;
+
+ return ret;
+}
+
+void BaseChannel::ChannelNotWritable_w() {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ if (!writable_)
+ return;
+
+ LOG(LS_INFO) << "Channel socket not writable ("
+ << transport_channel_->content_name() << ", "
+ << transport_channel_->component() << ")";
+ writable_ = false;
+ ChangeState();
+}
+
+// Sets the maximum video bandwidth for automatic bandwidth adjustment.
+bool BaseChannel::SetMaxSendBandwidth_w(int max_bandwidth) {
+ return media_channel()->SetSendBandwidth(true, max_bandwidth);
+}
+
+bool BaseChannel::SetSrtp_w(const std::vector<CryptoParams>& cryptos,
+ ContentAction action, ContentSource src) {
+ bool ret = false;
+ switch (action) {
+ case CA_OFFER:
+ ret = srtp_filter_.SetOffer(cryptos, src);
+ break;
+ case CA_PRANSWER:
+ // If we're doing DTLS-SRTP, we don't want to update the filter
+ // with an answer, because we already have SRTP parameters.
+ if (transport_channel_->IsDtlsActive()) {
+ LOG(LS_INFO) <<
+ "Ignoring SDES answer parameters because we are using DTLS-SRTP";
+ ret = true;
+ } else {
+ ret = srtp_filter_.SetProvisionalAnswer(cryptos, src);
+ }
+ break;
+ case CA_ANSWER:
+ // If we're doing DTLS-SRTP, we don't want to update the filter
+ // with an answer, because we already have SRTP parameters.
+ if (transport_channel_->IsDtlsActive()) {
+ LOG(LS_INFO) <<
+ "Ignoring SDES answer parameters because we are using DTLS-SRTP";
+ ret = true;
+ } else {
+ ret = srtp_filter_.SetAnswer(cryptos, src);
+ }
+ break;
+ case CA_UPDATE:
+ // no crypto params.
+ ret = true;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+bool BaseChannel::SetRtcpMux_w(bool enable, ContentAction action,
+ ContentSource src) {
+ bool ret = false;
+ switch (action) {
+ case CA_OFFER:
+ ret = rtcp_mux_filter_.SetOffer(enable, src);
+ break;
+ case CA_PRANSWER:
+ ret = rtcp_mux_filter_.SetProvisionalAnswer(enable, src);
+ break;
+ case CA_ANSWER:
+ ret = rtcp_mux_filter_.SetAnswer(enable, src);
+ if (ret && rtcp_mux_filter_.IsActive()) {
+ // We activated RTCP mux, close down the RTCP transport.
+ set_rtcp_transport_channel(NULL);
+ }
+ break;
+ case CA_UPDATE:
+ // No RTCP mux info.
+ ret = true;
+ default:
+ break;
+ }
+ // |rtcp_mux_filter_| can be active if |action| is CA_PRANSWER or
+ // CA_ANSWER, but we only want to tear down the RTCP transport channel if we
+ // received a final answer.
+ if (ret && rtcp_mux_filter_.IsActive()) {
+ // If the RTP transport is already writable, then so are we.
+ if (transport_channel_->writable()) {
+ ChannelWritable_w();
+ }
+ }
+
+ return ret;
+}
+
+bool BaseChannel::AddRecvStream_w(const StreamParams& sp) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+ if (!media_channel()->AddRecvStream(sp))
+ return false;
+
+ return ssrc_filter_.AddStream(sp);
+}
+
+bool BaseChannel::RemoveRecvStream_w(uint32 ssrc) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+ ssrc_filter_.RemoveStream(ssrc);
+ return media_channel()->RemoveRecvStream(ssrc);
+}
+
+bool BaseChannel::UpdateLocalStreams_w(const std::vector<StreamParams>& streams,
+ ContentAction action) {
+ if (!VERIFY(action == CA_OFFER || action == CA_ANSWER ||
+ action == CA_PRANSWER || action == CA_UPDATE))
+ return false;
+
+ // If this is an update, streams only contain streams that have changed.
+ if (action == CA_UPDATE) {
+ for (StreamParamsVec::const_iterator it = streams.begin();
+ it != streams.end(); ++it) {
+ StreamParams existing_stream;
+ bool stream_exist = GetStreamByIds(local_streams_, it->groupid,
+ it->id, &existing_stream);
+ if (!stream_exist && it->has_ssrcs()) {
+ if (media_channel()->AddSendStream(*it)) {
+ local_streams_.push_back(*it);
+ LOG(LS_INFO) << "Add send stream ssrc: " << it->first_ssrc();
+ } else {
+ LOG(LS_INFO) << "Failed to add send stream ssrc: "
+ << it->first_ssrc();
+ return false;
+ }
+ } else if (stream_exist && !it->has_ssrcs()) {
+ if (!media_channel()->RemoveSendStream(existing_stream.first_ssrc())) {
+ LOG(LS_ERROR) << "Failed to remove send stream with ssrc "
+ << it->first_ssrc() << ".";
+ return false;
+ }
+ RemoveStreamBySsrc(&local_streams_, existing_stream.first_ssrc());
+ } else {
+ LOG(LS_WARNING) << "Ignore unsupported stream update";
+ }
+ }
+ return true;
+ }
+ // Else streams are all the streams we want to send.
+
+ // Check for streams that have been removed.
+ bool ret = true;
+ for (StreamParamsVec::const_iterator it = local_streams_.begin();
+ it != local_streams_.end(); ++it) {
+ if (!GetStreamBySsrc(streams, it->first_ssrc(), NULL)) {
+ if (!media_channel()->RemoveSendStream(it->first_ssrc())) {
+ LOG(LS_ERROR) << "Failed to remove send stream with ssrc "
+ << it->first_ssrc() << ".";
+ ret = false;
+ }
+ }
+ }
+ // Check for new streams.
+ for (StreamParamsVec::const_iterator it = streams.begin();
+ it != streams.end(); ++it) {
+ if (!GetStreamBySsrc(local_streams_, it->first_ssrc(), NULL)) {
+ if (media_channel()->AddSendStream(*it)) {
+ LOG(LS_INFO) << "Add send ssrc: " << it->ssrcs[0];
+ } else {
+ LOG(LS_INFO) << "Failed to add send stream ssrc: " << it->first_ssrc();
+ ret = false;
+ }
+ }
+ }
+ local_streams_ = streams;
+ return ret;
+}
+
+bool BaseChannel::UpdateRemoteStreams_w(
+ const std::vector<StreamParams>& streams,
+ ContentAction action) {
+ if (!VERIFY(action == CA_OFFER || action == CA_ANSWER ||
+ action == CA_PRANSWER || action == CA_UPDATE))
+ return false;
+
+ // If this is an update, streams only contain streams that have changed.
+ if (action == CA_UPDATE) {
+ for (StreamParamsVec::const_iterator it = streams.begin();
+ it != streams.end(); ++it) {
+ StreamParams existing_stream;
+ bool stream_exists = GetStreamByIds(remote_streams_, it->groupid,
+ it->id, &existing_stream);
+ if (!stream_exists && it->has_ssrcs()) {
+ if (AddRecvStream_w(*it)) {
+ remote_streams_.push_back(*it);
+ LOG(LS_INFO) << "Add remote stream ssrc: " << it->first_ssrc();
+ } else {
+ LOG(LS_INFO) << "Failed to add remote stream ssrc: "
+ << it->first_ssrc();
+ return false;
+ }
+ } else if (stream_exists && !it->has_ssrcs()) {
+ if (!RemoveRecvStream_w(existing_stream.first_ssrc())) {
+ LOG(LS_ERROR) << "Failed to remove remote stream with ssrc "
+ << it->first_ssrc() << ".";
+ return false;
+ }
+ RemoveStreamBySsrc(&remote_streams_, existing_stream.first_ssrc());
+ } else {
+ LOG(LS_WARNING) << "Ignore unsupported stream update."
+ << " Stream exists? " << stream_exists
+ << " existing stream = " << existing_stream.ToString()
+ << " new stream = " << it->ToString();
+ }
+ }
+ return true;
+ }
+ // Else streams are all the streams we want to receive.
+
+ // Check for streams that have been removed.
+ bool ret = true;
+ for (StreamParamsVec::const_iterator it = remote_streams_.begin();
+ it != remote_streams_.end(); ++it) {
+ if (!GetStreamBySsrc(streams, it->first_ssrc(), NULL)) {
+ if (!RemoveRecvStream_w(it->first_ssrc())) {
+ LOG(LS_ERROR) << "Failed to remove remote stream with ssrc "
+ << it->first_ssrc() << ".";
+ ret = false;
+ }
+ }
+ }
+ // Check for new streams.
+ for (StreamParamsVec::const_iterator it = streams.begin();
+ it != streams.end(); ++it) {
+ if (!GetStreamBySsrc(remote_streams_, it->first_ssrc(), NULL)) {
+ if (AddRecvStream_w(*it)) {
+ LOG(LS_INFO) << "Add remote ssrc: " << it->ssrcs[0];
+ } else {
+ LOG(LS_INFO) << "Failed to add remote stream ssrc: "
+ << it->first_ssrc();
+ ret = false;
+ }
+ }
+ }
+ remote_streams_ = streams;
+ return ret;
+}
+
+bool BaseChannel::SetBaseLocalContent_w(const MediaContentDescription* content,
+ ContentAction action) {
+ // Cache secure_required_ for belt and suspenders check on SendPacket
+ secure_required_ = content->crypto_required();
+ bool ret = UpdateLocalStreams_w(content->streams(), action);
+ // Set local SRTP parameters (what we will encrypt with).
+ ret &= SetSrtp_w(content->cryptos(), action, CS_LOCAL);
+ // Set local RTCP mux parameters.
+ ret &= SetRtcpMux_w(content->rtcp_mux(), action, CS_LOCAL);
+ // Set local RTP header extensions.
+ if (content->rtp_header_extensions_set()) {
+ ret &= media_channel()->SetRecvRtpHeaderExtensions(
+ content->rtp_header_extensions());
+ }
+ set_local_content_direction(content->direction());
+ return ret;
+}
+
+bool BaseChannel::SetBaseRemoteContent_w(const MediaContentDescription* content,
+ ContentAction action) {
+ bool ret = UpdateRemoteStreams_w(content->streams(), action);
+ // Set remote SRTP parameters (what the other side will encrypt with).
+ ret &= SetSrtp_w(content->cryptos(), action, CS_REMOTE);
+ // Set remote RTCP mux parameters.
+ ret &= SetRtcpMux_w(content->rtcp_mux(), action, CS_REMOTE);
+ // Set remote RTP header extensions.
+ if (content->rtp_header_extensions_set()) {
+ ret &= media_channel()->SetSendRtpHeaderExtensions(
+ content->rtp_header_extensions());
+ }
+ if (content->bandwidth() != kAutoBandwidth) {
+ ret &= media_channel()->SetSendBandwidth(false, content->bandwidth());
+ }
+ set_remote_content_direction(content->direction());
+ return ret;
+}
+
+void BaseChannel::OnMessage(talk_base::Message *pmsg) {
+ switch (pmsg->message_id) {
+ case MSG_ENABLE:
+ EnableMedia_w();
+ break;
+ case MSG_DISABLE:
+ DisableMedia_w();
+ break;
+ case MSG_MUTESTREAM: {
+ MuteStreamData* data = static_cast<MuteStreamData*>(pmsg->pdata);
+ data->result = MuteStream_w(data->ssrc, data->mute);
+ break;
+ }
+ case MSG_ISSTREAMMUTED: {
+ SsrcMessageData* data = static_cast<SsrcMessageData*>(pmsg->pdata);
+ data->result = IsStreamMuted_w(data->ssrc);
+ break;
+ }
+ case MSG_SETLOCALCONTENT: {
+ SetContentData* data = static_cast<SetContentData*>(pmsg->pdata);
+ data->result = SetLocalContent_w(data->content, data->action);
+ break;
+ }
+ case MSG_SETREMOTECONTENT: {
+ SetContentData* data = static_cast<SetContentData*>(pmsg->pdata);
+ data->result = SetRemoteContent_w(data->content, data->action);
+ break;
+ }
+ case MSG_ADDRECVSTREAM: {
+ StreamMessageData* data = static_cast<StreamMessageData*>(pmsg->pdata);
+ data->result = AddRecvStream_w(data->sp);
+ break;
+ }
+ case MSG_REMOVERECVSTREAM: {
+ SsrcMessageData* data = static_cast<SsrcMessageData*>(pmsg->pdata);
+ data->result = RemoveRecvStream_w(data->ssrc);
+ break;
+ }
+ case MSG_SETMAXSENDBANDWIDTH: {
+ SetBandwidthData* data = static_cast<SetBandwidthData*>(pmsg->pdata);
+ data->result = SetMaxSendBandwidth_w(data->value);
+ break;
+ }
+
+ case MSG_RTPPACKET:
+ case MSG_RTCPPACKET: {
+ PacketMessageData* data = static_cast<PacketMessageData*>(pmsg->pdata);
+ SendPacket(pmsg->message_id == MSG_RTCPPACKET, &data->packet);
+ delete data; // because it is Posted
+ break;
+ }
+ case MSG_FIRSTPACKETRECEIVED: {
+ SignalFirstPacketReceived(this);
+ break;
+ }
+ case MSG_SESSION_ERROR: {
+ SessionErrorMessageData* data = static_cast<SessionErrorMessageData*>
+ (pmsg->pdata);
+ session_->SetError(data->error_);
+ break;
+ }
+ }
+}
+
+void BaseChannel::Send(uint32 id, talk_base::MessageData *pdata) {
+ worker_thread_->Send(this, id, pdata);
+}
+
+void BaseChannel::Post(uint32 id, talk_base::MessageData *pdata) {
+ worker_thread_->Post(this, id, pdata);
+}
+
+void BaseChannel::PostDelayed(int cmsDelay, uint32 id,
+ talk_base::MessageData *pdata) {
+ worker_thread_->PostDelayed(cmsDelay, this, id, pdata);
+}
+
+void BaseChannel::Clear(uint32 id, talk_base::MessageList* removed) {
+ worker_thread_->Clear(this, id, removed);
+}
+
+void BaseChannel::FlushRtcpMessages() {
+ // Flush all remaining RTCP messages. This should only be called in
+ // destructor.
+ ASSERT(talk_base::Thread::Current() == worker_thread_);
+ talk_base::MessageList rtcp_messages;
+ Clear(MSG_RTCPPACKET, &rtcp_messages);
+ for (talk_base::MessageList::iterator it = rtcp_messages.begin();
+ it != rtcp_messages.end(); ++it) {
+ Send(MSG_RTCPPACKET, it->pdata);
+ }
+}
+
+VoiceChannel::VoiceChannel(talk_base::Thread* thread,
+ MediaEngineInterface* media_engine,
+ VoiceMediaChannel* media_channel,
+ BaseSession* session,
+ const std::string& content_name,
+ bool rtcp)
+ : BaseChannel(thread, media_engine, media_channel, session, content_name,
+ rtcp),
+ received_media_(false) {
+}
+
+VoiceChannel::~VoiceChannel() {
+ StopAudioMonitor();
+ StopMediaMonitor();
+ // this can't be done in the base class, since it calls a virtual
+ DisableMedia_w();
+}
+
+bool VoiceChannel::Init() {
+ TransportChannel* rtcp_channel = rtcp() ? session()->CreateChannel(
+ content_name(), "rtcp", ICE_CANDIDATE_COMPONENT_RTCP) : NULL;
+ if (!BaseChannel::Init(session()->CreateChannel(
+ content_name(), "rtp", ICE_CANDIDATE_COMPONENT_RTP),
+ rtcp_channel)) {
+ return false;
+ }
+ media_channel()->SignalMediaError.connect(
+ this, &VoiceChannel::OnVoiceChannelError);
+ srtp_filter()->SignalSrtpError.connect(
+ this, &VoiceChannel::OnSrtpError);
+ return true;
+}
+
+bool VoiceChannel::SetRemoteRenderer(uint32 ssrc, AudioRenderer* renderer) {
+ AudioRenderMessageData data(ssrc, renderer, false);
+ Send(MSG_SETRENDERER, &data);
+ return data.result;
+}
+
+bool VoiceChannel::SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer) {
+ AudioRenderMessageData data(ssrc, renderer, true);
+ Send(MSG_SETRENDERER, &data);
+ return data.result;
+}
+
+bool VoiceChannel::SetRingbackTone(const void* buf, int len) {
+ SetRingbackToneMessageData data(buf, len);
+ Send(MSG_SETRINGBACKTONE, &data);
+ return data.result;
+}
+
+// TODO(juberti): Handle early media the right way. We should get an explicit
+// ringing message telling us to start playing local ringback, which we cancel
+// if any early media actually arrives. For now, we do the opposite, which is
+// to wait 1 second for early media, and start playing local ringback if none
+// arrives.
+void VoiceChannel::SetEarlyMedia(bool enable) {
+ if (enable) {
+ // Start the early media timeout
+ PostDelayed(kEarlyMediaTimeout, MSG_EARLYMEDIATIMEOUT);
+ } else {
+ // Stop the timeout if currently going.
+ Clear(MSG_EARLYMEDIATIMEOUT);
+ }
+}
+
+bool VoiceChannel::PlayRingbackTone(uint32 ssrc, bool play, bool loop) {
+ PlayRingbackToneMessageData data(ssrc, play, loop);
+ Send(MSG_PLAYRINGBACKTONE, &data);
+ return data.result;
+}
+
+bool VoiceChannel::PressDTMF(int digit, bool playout) {
+ int flags = DF_SEND;
+ if (playout) {
+ flags |= DF_PLAY;
+ }
+ int duration_ms = 160;
+ return InsertDtmf(0, digit, duration_ms, flags);
+}
+
+bool VoiceChannel::CanInsertDtmf() {
+ BoolMessageData data(false);
+ Send(MSG_CANINSERTDTMF, &data);
+ return data.data();
+}
+
+bool VoiceChannel::InsertDtmf(uint32 ssrc, int event_code, int duration,
+ int flags) {
+ DtmfMessageData data(ssrc, event_code, duration, flags);
+ Send(MSG_INSERTDTMF, &data);
+ return data.result;
+}
+
+bool VoiceChannel::SetOutputScaling(uint32 ssrc, double left, double right) {
+ ScaleVolumeMessageData data(ssrc, left, right);
+ Send(MSG_SCALEVOLUME, &data);
+ return data.result;
+}
+bool VoiceChannel::GetStats(VoiceMediaInfo* stats) {
+ VoiceStatsMessageData data(stats);
+ Send(MSG_GETSTATS, &data);
+ return data.result;
+}
+
+void VoiceChannel::StartMediaMonitor(int cms) {
+ media_monitor_.reset(new VoiceMediaMonitor(media_channel(), worker_thread(),
+ talk_base::Thread::Current()));
+ media_monitor_->SignalUpdate.connect(
+ this, &VoiceChannel::OnMediaMonitorUpdate);
+ media_monitor_->Start(cms);
+}
+
+void VoiceChannel::StopMediaMonitor() {
+ if (media_monitor_) {
+ media_monitor_->Stop();
+ media_monitor_->SignalUpdate.disconnect(this);
+ media_monitor_.reset();
+ }
+}
+
+void VoiceChannel::StartAudioMonitor(int cms) {
+ audio_monitor_.reset(new AudioMonitor(this, talk_base::Thread::Current()));
+ audio_monitor_
+ ->SignalUpdate.connect(this, &VoiceChannel::OnAudioMonitorUpdate);
+ audio_monitor_->Start(cms);
+}
+
+void VoiceChannel::StopAudioMonitor() {
+ if (audio_monitor_) {
+ audio_monitor_->Stop();
+ audio_monitor_.reset();
+ }
+}
+
+bool VoiceChannel::IsAudioMonitorRunning() const {
+ return (audio_monitor_.get() != NULL);
+}
+
+void VoiceChannel::StartTypingMonitor(const TypingMonitorOptions& settings) {
+ typing_monitor_.reset(new TypingMonitor(this, worker_thread(), settings));
+ SignalAutoMuted.repeat(typing_monitor_->SignalMuted);
+}
+
+void VoiceChannel::StopTypingMonitor() {
+ typing_monitor_.reset();
+}
+
+bool VoiceChannel::IsTypingMonitorRunning() const {
+ return typing_monitor_;
+}
+
+bool VoiceChannel::MuteStream_w(uint32 ssrc, bool mute) {
+ bool ret = BaseChannel::MuteStream_w(ssrc, mute);
+ if (typing_monitor_ && mute)
+ typing_monitor_->OnChannelMuted();
+ return ret;
+}
+
+int VoiceChannel::GetInputLevel_w() {
+ return media_engine()->GetInputLevel();
+}
+
+int VoiceChannel::GetOutputLevel_w() {
+ return media_channel()->GetOutputLevel();
+}
+
+void VoiceChannel::GetActiveStreams_w(AudioInfo::StreamList* actives) {
+ media_channel()->GetActiveStreams(actives);
+}
+
+void VoiceChannel::OnChannelRead(TransportChannel* channel,
+ const char* data, size_t len, int flags) {
+ BaseChannel::OnChannelRead(channel, data, len, flags);
+
+ // Set a flag when we've received an RTP packet. If we're waiting for early
+ // media, this will disable the timeout.
+ if (!received_media_ && !PacketIsRtcp(channel, data, len)) {
+ received_media_ = true;
+ }
+}
+
+void VoiceChannel::ChangeState() {
+ // Render incoming data if we're the active call, and we have the local
+ // content. We receive data on the default channel and multiplexed streams.
+ bool recv = IsReadyToReceive();
+ if (!media_channel()->SetPlayout(recv)) {
+ SendLastMediaError();
+ }
+
+ // Send outgoing data if we're the active call, we have the remote content,
+ // and we have had some form of connectivity.
+ bool send = IsReadyToSend();
+ SendFlags send_flag = send ? SEND_MICROPHONE : SEND_NOTHING;
+ if (!media_channel()->SetSend(send_flag)) {
+ LOG(LS_ERROR) << "Failed to SetSend " << send_flag << " on voice channel";
+ SendLastMediaError();
+ }
+
+ LOG(LS_INFO) << "Changing voice state, recv=" << recv << " send=" << send;
+}
+
+const ContentInfo* VoiceChannel::GetFirstContent(
+ const SessionDescription* sdesc) {
+ return GetFirstAudioContent(sdesc);
+}
+
+bool VoiceChannel::SetLocalContent_w(const MediaContentDescription* content,
+ ContentAction action) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+ LOG(LS_INFO) << "Setting local voice description";
+
+ const AudioContentDescription* audio =
+ static_cast<const AudioContentDescription*>(content);
+ ASSERT(audio != NULL);
+ if (!audio) return false;
+
+ bool ret = SetBaseLocalContent_w(content, action);
+ // Set local audio codecs (what we want to receive).
+ // TODO(whyuan): Change action != CA_UPDATE to !audio->partial() when partial
+ // is set properly.
+ if (action != CA_UPDATE || audio->has_codecs()) {
+ ret &= media_channel()->SetRecvCodecs(audio->codecs());
+ }
+
+ // If everything worked, see if we can start receiving.
+ if (ret) {
+ ChangeState();
+ } else {
+ LOG(LS_WARNING) << "Failed to set local voice description";
+ }
+ return ret;
+}
+
+bool VoiceChannel::SetRemoteContent_w(const MediaContentDescription* content,
+ ContentAction action) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+ LOG(LS_INFO) << "Setting remote voice description";
+
+ const AudioContentDescription* audio =
+ static_cast<const AudioContentDescription*>(content);
+ ASSERT(audio != NULL);
+ if (!audio) return false;
+
+ bool ret = true;
+ // Set remote video codecs (what the other side wants to receive).
+ if (action != CA_UPDATE || audio->has_codecs()) {
+ ret &= media_channel()->SetSendCodecs(audio->codecs());
+ }
+
+ ret &= SetBaseRemoteContent_w(content, action);
+
+ if (action != CA_UPDATE) {
+ // Tweak our audio processing settings, if needed.
+ AudioOptions audio_options;
+ if (!media_channel()->GetOptions(&audio_options)) {
+ LOG(LS_WARNING) << "Can not set audio options from on remote content.";
+ } else {
+ if (audio->conference_mode()) {
+ audio_options.conference_mode.Set(true);
+ }
+ if (audio->agc_minus_10db()) {
+ audio_options.adjust_agc_delta.Set(kAgcMinus10db);
+ }
+ if (!media_channel()->SetOptions(audio_options)) {
+ // Log an error on failure, but don't abort the call.
+ LOG(LS_ERROR) << "Failed to set voice channel options";
+ }
+ }
+ }
+
+ // If everything worked, see if we can start sending.
+ if (ret) {
+ ChangeState();
+ } else {
+ LOG(LS_WARNING) << "Failed to set remote voice description";
+ }
+ return ret;
+}
+
+bool VoiceChannel::SetRingbackTone_w(const void* buf, int len) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+ return media_channel()->SetRingbackTone(static_cast<const char*>(buf), len);
+}
+
+bool VoiceChannel::PlayRingbackTone_w(uint32 ssrc, bool play, bool loop) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+ if (play) {
+ LOG(LS_INFO) << "Playing ringback tone, loop=" << loop;
+ } else {
+ LOG(LS_INFO) << "Stopping ringback tone";
+ }
+ return media_channel()->PlayRingbackTone(ssrc, play, loop);
+}
+
+void VoiceChannel::HandleEarlyMediaTimeout() {
+ // This occurs on the main thread, not the worker thread.
+ if (!received_media_) {
+ LOG(LS_INFO) << "No early media received before timeout";
+ SignalEarlyMediaTimeout(this);
+ }
+}
+
+bool VoiceChannel::CanInsertDtmf_w() {
+ return media_channel()->CanInsertDtmf();
+}
+
+bool VoiceChannel::InsertDtmf_w(uint32 ssrc, int event, int duration,
+ int flags) {
+ if (!enabled()) {
+ return false;
+ }
+
+ return media_channel()->InsertDtmf(ssrc, event, duration, flags);
+}
+
+bool VoiceChannel::SetOutputScaling_w(uint32 ssrc, double left, double right) {
+ return media_channel()->SetOutputScaling(ssrc, left, right);
+}
+
+bool VoiceChannel::GetStats_w(VoiceMediaInfo* stats) {
+ return media_channel()->GetStats(stats);
+}
+
+bool VoiceChannel::SetChannelOptions(const AudioOptions& options) {
+ AudioOptionsMessageData data(options);
+ Send(MSG_SETCHANNELOPTIONS, &data);
+ return data.result;
+}
+
+bool VoiceChannel::SetChannelOptions_w(const AudioOptions& options) {
+ return media_channel()->SetOptions(options);
+}
+
+bool VoiceChannel::SetRenderer_w(uint32 ssrc, AudioRenderer* renderer,
+ bool is_local) {
+ if (is_local)
+ return media_channel()->SetLocalRenderer(ssrc, renderer);
+
+ return media_channel()->SetRemoteRenderer(ssrc, renderer);
+}
+
+void VoiceChannel::OnMessage(talk_base::Message *pmsg) {
+ switch (pmsg->message_id) {
+ case MSG_SETRINGBACKTONE: {
+ SetRingbackToneMessageData* data =
+ static_cast<SetRingbackToneMessageData*>(pmsg->pdata);
+ data->result = SetRingbackTone_w(data->buf, data->len);
+ break;
+ }
+ case MSG_PLAYRINGBACKTONE: {
+ PlayRingbackToneMessageData* data =
+ static_cast<PlayRingbackToneMessageData*>(pmsg->pdata);
+ data->result = PlayRingbackTone_w(data->ssrc, data->play, data->loop);
+ break;
+ }
+ case MSG_EARLYMEDIATIMEOUT:
+ HandleEarlyMediaTimeout();
+ break;
+ case MSG_CANINSERTDTMF: {
+ BoolMessageData* data =
+ static_cast<BoolMessageData*>(pmsg->pdata);
+ data->data() = CanInsertDtmf_w();
+ break;
+ }
+ case MSG_INSERTDTMF: {
+ DtmfMessageData* data =
+ static_cast<DtmfMessageData*>(pmsg->pdata);
+ data->result = InsertDtmf_w(data->ssrc, data->event, data->duration,
+ data->flags);
+ break;
+ }
+ case MSG_SCALEVOLUME: {
+ ScaleVolumeMessageData* data =
+ static_cast<ScaleVolumeMessageData*>(pmsg->pdata);
+ data->result = SetOutputScaling_w(data->ssrc, data->left, data->right);
+ break;
+ }
+ case MSG_GETSTATS: {
+ VoiceStatsMessageData* data =
+ static_cast<VoiceStatsMessageData*>(pmsg->pdata);
+ data->result = GetStats_w(data->stats);
+ break;
+ }
+ case MSG_CHANNEL_ERROR: {
+ VoiceChannelErrorMessageData* data =
+ static_cast<VoiceChannelErrorMessageData*>(pmsg->pdata);
+ SignalMediaError(this, data->ssrc, data->error);
+ delete data;
+ break;
+ }
+ case MSG_SETCHANNELOPTIONS: {
+ AudioOptionsMessageData* data =
+ static_cast<AudioOptionsMessageData*>(pmsg->pdata);
+ data->result = SetChannelOptions_w(data->options);
+ break;
+ }
+ case MSG_SETRENDERER: {
+ AudioRenderMessageData* data =
+ static_cast<AudioRenderMessageData*>(pmsg->pdata);
+ data->result = SetRenderer_w(data->ssrc, data->renderer, data->is_local);
+ break;
+ }
+ default:
+ BaseChannel::OnMessage(pmsg);
+ break;
+ }
+}
+
+void VoiceChannel::OnConnectionMonitorUpdate(
+ SocketMonitor* monitor, const std::vector<ConnectionInfo>& infos) {
+ SignalConnectionMonitor(this, infos);
+}
+
+void VoiceChannel::OnMediaMonitorUpdate(
+ VoiceMediaChannel* media_channel, const VoiceMediaInfo& info) {
+ ASSERT(media_channel == this->media_channel());
+ SignalMediaMonitor(this, info);
+}
+
+void VoiceChannel::OnAudioMonitorUpdate(AudioMonitor* monitor,
+ const AudioInfo& info) {
+ SignalAudioMonitor(this, info);
+}
+
+void VoiceChannel::OnVoiceChannelError(
+ uint32 ssrc, VoiceMediaChannel::Error err) {
+ VoiceChannelErrorMessageData* data = new VoiceChannelErrorMessageData(
+ ssrc, err);
+ signaling_thread()->Post(this, MSG_CHANNEL_ERROR, data);
+}
+
+void VoiceChannel::OnSrtpError(uint32 ssrc, SrtpFilter::Mode mode,
+ SrtpFilter::Error error) {
+ switch (error) {
+ case SrtpFilter::ERROR_FAIL:
+ OnVoiceChannelError(ssrc, (mode == SrtpFilter::PROTECT) ?
+ VoiceMediaChannel::ERROR_REC_SRTP_ERROR :
+ VoiceMediaChannel::ERROR_PLAY_SRTP_ERROR);
+ break;
+ case SrtpFilter::ERROR_AUTH:
+ OnVoiceChannelError(ssrc, (mode == SrtpFilter::PROTECT) ?
+ VoiceMediaChannel::ERROR_REC_SRTP_AUTH_FAILED :
+ VoiceMediaChannel::ERROR_PLAY_SRTP_AUTH_FAILED);
+ break;
+ case SrtpFilter::ERROR_REPLAY:
+ // Only receving channel should have this error.
+ ASSERT(mode == SrtpFilter::UNPROTECT);
+ OnVoiceChannelError(ssrc, VoiceMediaChannel::ERROR_PLAY_SRTP_REPLAY);
+ break;
+ default:
+ break;
+ }
+}
+
+void VoiceChannel::GetSrtpCiphers(std::vector<std::string>* ciphers) const {
+ GetSupportedAudioCryptoSuites(ciphers);
+}
+
+VideoChannel::VideoChannel(talk_base::Thread* thread,
+ MediaEngineInterface* media_engine,
+ VideoMediaChannel* media_channel,
+ BaseSession* session,
+ const std::string& content_name,
+ bool rtcp,
+ VoiceChannel* voice_channel)
+ : BaseChannel(thread, media_engine, media_channel, session, content_name,
+ rtcp),
+ voice_channel_(voice_channel),
+ renderer_(NULL),
+ screencapture_factory_(CreateScreenCapturerFactory()),
+ previous_we_(talk_base::WE_CLOSE) {
+}
+
+bool VideoChannel::Init() {
+ TransportChannel* rtcp_channel = rtcp() ? session()->CreateChannel(
+ content_name(), "video_rtcp", ICE_CANDIDATE_COMPONENT_RTCP) : NULL;
+ if (!BaseChannel::Init(session()->CreateChannel(
+ content_name(), "video_rtp", ICE_CANDIDATE_COMPONENT_RTP),
+ rtcp_channel)) {
+ return false;
+ }
+ media_channel()->SignalMediaError.connect(
+ this, &VideoChannel::OnVideoChannelError);
+ srtp_filter()->SignalSrtpError.connect(
+ this, &VideoChannel::OnSrtpError);
+ return true;
+}
+
+void VoiceChannel::SendLastMediaError() {
+ uint32 ssrc;
+ VoiceMediaChannel::Error error;
+ media_channel()->GetLastMediaError(&ssrc, &error);
+ SignalMediaError(this, ssrc, error);
+}
+
+VideoChannel::~VideoChannel() {
+ std::vector<uint32> screencast_ssrcs;
+ ScreencastMap::iterator iter;
+ while (!screencast_capturers_.empty()) {
+ if (!RemoveScreencast(screencast_capturers_.begin()->first)) {
+ LOG(LS_ERROR) << "Unable to delete screencast with ssrc "
+ << screencast_capturers_.begin()->first;
+ ASSERT(false);
+ break;
+ }
+ }
+
+ StopMediaMonitor();
+ // this can't be done in the base class, since it calls a virtual
+ DisableMedia_w();
+}
+
+bool VideoChannel::SetRenderer(uint32 ssrc, VideoRenderer* renderer) {
+ VideoRenderMessageData data(ssrc, renderer);
+ Send(MSG_SETRENDERER, &data);
+ return true;
+}
+
+bool VideoChannel::ApplyViewRequest(const ViewRequest& request) {
+ ViewRequestMessageData data(request);
+ Send(MSG_HANDLEVIEWREQUEST, &data);
+ return data.result;
+}
+
+VideoCapturer* VideoChannel::AddScreencast(
+ uint32 ssrc, const ScreencastId& id) {
+ AddScreencastMessageData data(ssrc, id);
+ Send(MSG_ADDSCREENCAST, &data);
+ return data.result;
+}
+
+bool VideoChannel::SetCapturer(uint32 ssrc, VideoCapturer* capturer) {
+ SetCapturerMessageData data(ssrc, capturer);
+ Send(MSG_SETCAPTURER, &data);
+ return data.result;
+}
+
+bool VideoChannel::RemoveScreencast(uint32 ssrc) {
+ RemoveScreencastMessageData data(ssrc);
+ Send(MSG_REMOVESCREENCAST, &data);
+ return data.result;
+}
+
+bool VideoChannel::IsScreencasting() {
+ IsScreencastingMessageData data;
+ Send(MSG_ISSCREENCASTING, &data);
+ return data.result;
+}
+
+int VideoChannel::ScreencastFps(uint32 ssrc) {
+ ScreencastFpsMessageData data(ssrc);
+ Send(MSG_SCREENCASTFPS, &data);
+ return data.result;
+}
+
+bool VideoChannel::SendIntraFrame() {
+ Send(MSG_SENDINTRAFRAME);
+ return true;
+}
+
+bool VideoChannel::RequestIntraFrame() {
+ Send(MSG_REQUESTINTRAFRAME);
+ return true;
+}
+
+void VideoChannel::SetScreenCaptureFactory(
+ ScreenCapturerFactory* screencapture_factory) {
+ SetScreenCaptureFactoryMessageData data(screencapture_factory);
+ Send(MSG_SETSCREENCASTFACTORY, &data);
+}
+
+void VideoChannel::ChangeState() {
+ // Render incoming data if we're the active call, and we have the local
+ // content. We receive data on the default channel and multiplexed streams.
+ bool recv = IsReadyToReceive();
+ if (!media_channel()->SetRender(recv)) {
+ LOG(LS_ERROR) << "Failed to SetRender on video channel";
+ // TODO(gangji): Report error back to server.
+ }
+
+ // Send outgoing data if we're the active call, we have the remote content,
+ // and we have had some form of connectivity.
+ bool send = IsReadyToSend();
+ if (!media_channel()->SetSend(send)) {
+ LOG(LS_ERROR) << "Failed to SetSend on video channel";
+ // TODO(gangji): Report error back to server.
+ }
+
+ LOG(LS_INFO) << "Changing video state, recv=" << recv << " send=" << send;
+}
+
+bool VideoChannel::GetStats(VideoMediaInfo* stats) {
+ VideoStatsMessageData data(stats);
+ Send(MSG_GETSTATS, &data);
+ return data.result;
+}
+
+void VideoChannel::StartMediaMonitor(int cms) {
+ media_monitor_.reset(new VideoMediaMonitor(media_channel(), worker_thread(),
+ talk_base::Thread::Current()));
+ media_monitor_->SignalUpdate.connect(
+ this, &VideoChannel::OnMediaMonitorUpdate);
+ media_monitor_->Start(cms);
+}
+
+void VideoChannel::StopMediaMonitor() {
+ if (media_monitor_) {
+ media_monitor_->Stop();
+ media_monitor_.reset();
+ }
+}
+
+const ContentInfo* VideoChannel::GetFirstContent(
+ const SessionDescription* sdesc) {
+ return GetFirstVideoContent(sdesc);
+}
+
+bool VideoChannel::SetLocalContent_w(const MediaContentDescription* content,
+ ContentAction action) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+ LOG(LS_INFO) << "Setting local video description";
+
+ const VideoContentDescription* video =
+ static_cast<const VideoContentDescription*>(content);
+ ASSERT(video != NULL);
+ if (!video) return false;
+
+ bool ret = SetBaseLocalContent_w(content, action);
+ // Set local video codecs (what we want to receive).
+ if (action != CA_UPDATE || video->has_codecs()) {
+ ret &= media_channel()->SetRecvCodecs(video->codecs());
+ }
+
+ if (action != CA_UPDATE) {
+ VideoOptions video_options;
+ media_channel()->GetOptions(&video_options);
+ video_options.buffered_mode_latency.Set(video->buffered_mode_latency());
+
+ if (!media_channel()->SetOptions(video_options)) {
+ // Log an error on failure, but don't abort the call.
+ LOG(LS_ERROR) << "Failed to set video channel options";
+ }
+ }
+
+ // If everything worked, see if we can start receiving.
+ if (ret) {
+ ChangeState();
+ } else {
+ LOG(LS_WARNING) << "Failed to set local video description";
+ }
+ return ret;
+}
+
+bool VideoChannel::SetRemoteContent_w(const MediaContentDescription* content,
+ ContentAction action) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+ LOG(LS_INFO) << "Setting remote video description";
+
+ const VideoContentDescription* video =
+ static_cast<const VideoContentDescription*>(content);
+ ASSERT(video != NULL);
+ if (!video) return false;
+
+ bool ret = true;
+ // Set remote video codecs (what the other side wants to receive).
+ if (action != CA_UPDATE || video->has_codecs()) {
+ ret &= media_channel()->SetSendCodecs(video->codecs());
+ }
+
+ ret &= SetBaseRemoteContent_w(content, action);
+
+ if (action != CA_UPDATE) {
+ // Tweak our video processing settings, if needed.
+ VideoOptions video_options;
+ media_channel()->GetOptions(&video_options);
+ video_options.conference_mode.Set(video->conference_mode());
+ video_options.buffered_mode_latency.Set(video->buffered_mode_latency());
+
+ if (!media_channel()->SetOptions(video_options)) {
+ // Log an error on failure, but don't abort the call.
+ LOG(LS_ERROR) << "Failed to set video channel options";
+ }
+ }
+
+ // If everything worked, see if we can start sending.
+ if (ret) {
+ ChangeState();
+ } else {
+ LOG(LS_WARNING) << "Failed to set remote video description";
+ }
+ return ret;
+}
+
+bool VideoChannel::ApplyViewRequest_w(const ViewRequest& request) {
+ bool ret = true;
+ // Set the send format for each of the local streams. If the view request
+ // does not contain a local stream, set its send format to 0x0, which will
+ // drop all frames.
+ for (std::vector<StreamParams>::const_iterator it = local_streams().begin();
+ it != local_streams().end(); ++it) {
+ VideoFormat format(0, 0, 0, cricket::FOURCC_I420);
+ StaticVideoViews::const_iterator view;
+ for (view = request.static_video_views.begin();
+ view != request.static_video_views.end(); ++view) {
+ if (view->selector.Matches(*it)) {
+ format.width = view->width;
+ format.height = view->height;
+ format.interval = cricket::VideoFormat::FpsToInterval(view->framerate);
+ break;
+ }
+ }
+
+ ret &= media_channel()->SetSendStreamFormat(it->first_ssrc(), format);
+ }
+
+ // Check if the view request has invalid streams.
+ for (StaticVideoViews::const_iterator it = request.static_video_views.begin();
+ it != request.static_video_views.end(); ++it) {
+ if (!GetStream(local_streams(), it->selector, NULL)) {
+ LOG(LS_WARNING) << "View request for ("
+ << it->selector.ssrc << ", '"
+ << it->selector.groupid << "', '"
+ << it->selector.streamid << "'"
+ << ") is not in the local streams.";
+ }
+ }
+
+ return ret;
+}
+
+void VideoChannel::SetRenderer_w(uint32 ssrc, VideoRenderer* renderer) {
+ media_channel()->SetRenderer(ssrc, renderer);
+}
+
+VideoCapturer* VideoChannel::AddScreencast_w(
+ uint32 ssrc, const ScreencastId& id) {
+ if (screencast_capturers_.find(ssrc) != screencast_capturers_.end()) {
+ return NULL;
+ }
+ VideoCapturer* screen_capturer =
+ screencapture_factory_->CreateScreenCapturer(id);
+ if (!screen_capturer) {
+ return NULL;
+ }
+ screen_capturer->SignalStateChange.connect(this,
+ &VideoChannel::OnStateChange);
+ screencast_capturers_[ssrc] = screen_capturer;
+ return screen_capturer;
+}
+
+bool VideoChannel::SetCapturer_w(uint32 ssrc, VideoCapturer* capturer) {
+ return media_channel()->SetCapturer(ssrc, capturer);
+}
+
+bool VideoChannel::RemoveScreencast_w(uint32 ssrc) {
+ ScreencastMap::iterator iter = screencast_capturers_.find(ssrc);
+ if (iter == screencast_capturers_.end()) {
+ return false;
+ }
+ // Clean up VideoCapturer.
+ delete iter->second;
+ screencast_capturers_.erase(iter);
+ return true;
+}
+
+bool VideoChannel::IsScreencasting_w() const {
+ return !screencast_capturers_.empty();
+}
+
+int VideoChannel::ScreencastFps_w(uint32 ssrc) const {
+ ScreencastMap::const_iterator iter = screencast_capturers_.find(ssrc);
+ if (iter == screencast_capturers_.end()) {
+ return 0;
+ }
+ VideoCapturer* capturer = iter->second;
+ const VideoFormat* video_format = capturer->GetCaptureFormat();
+ return VideoFormat::IntervalToFps(video_format->interval);
+}
+
+void VideoChannel::SetScreenCaptureFactory_w(
+ ScreenCapturerFactory* screencapture_factory) {
+ if (screencapture_factory == NULL) {
+ screencapture_factory_.reset(CreateScreenCapturerFactory());
+ } else {
+ screencapture_factory_.reset(screencapture_factory);
+ }
+}
+
+bool VideoChannel::GetStats_w(VideoMediaInfo* stats) {
+ return media_channel()->GetStats(stats);
+}
+
+void VideoChannel::OnScreencastWindowEvent_s(uint32 ssrc,
+ talk_base::WindowEvent we) {
+ ASSERT(signaling_thread() == talk_base::Thread::Current());
+ SignalScreencastWindowEvent(ssrc, we);
+}
+
+bool VideoChannel::SetChannelOptions(const VideoOptions &options) {
+ VideoOptionsMessageData data(options);
+ Send(MSG_SETCHANNELOPTIONS, &data);
+ return data.result;
+}
+
+bool VideoChannel::SetChannelOptions_w(const VideoOptions &options) {
+ return media_channel()->SetOptions(options);
+}
+
+void VideoChannel::OnMessage(talk_base::Message *pmsg) {
+ switch (pmsg->message_id) {
+ case MSG_SETRENDERER: {
+ const VideoRenderMessageData* data =
+ static_cast<VideoRenderMessageData*>(pmsg->pdata);
+ SetRenderer_w(data->ssrc, data->renderer);
+ break;
+ }
+ case MSG_ADDSCREENCAST: {
+ AddScreencastMessageData* data =
+ static_cast<AddScreencastMessageData*>(pmsg->pdata);
+ data->result = AddScreencast_w(data->ssrc, data->window_id);
+ break;
+ }
+ case MSG_SETCAPTURER: {
+ SetCapturerMessageData* data =
+ static_cast<SetCapturerMessageData*>(pmsg->pdata);
+ data->result = SetCapturer_w(data->ssrc, data->capturer);
+ break;
+ }
+ case MSG_REMOVESCREENCAST: {
+ RemoveScreencastMessageData* data =
+ static_cast<RemoveScreencastMessageData*>(pmsg->pdata);
+ data->result = RemoveScreencast_w(data->ssrc);
+ break;
+ }
+ case MSG_SCREENCASTWINDOWEVENT: {
+ const ScreencastEventMessageData* data =
+ static_cast<ScreencastEventMessageData*>(pmsg->pdata);
+ OnScreencastWindowEvent_s(data->ssrc, data->event);
+ delete data;
+ break;
+ }
+ case MSG_ISSCREENCASTING: {
+ IsScreencastingMessageData* data =
+ static_cast<IsScreencastingMessageData*>(pmsg->pdata);
+ data->result = IsScreencasting_w();
+ break;
+ }
+ case MSG_SCREENCASTFPS: {
+ ScreencastFpsMessageData* data =
+ static_cast<ScreencastFpsMessageData*>(pmsg->pdata);
+ data->result = ScreencastFps_w(data->ssrc);
+ break;
+ }
+ case MSG_SENDINTRAFRAME: {
+ SendIntraFrame_w();
+ break;
+ }
+ case MSG_REQUESTINTRAFRAME: {
+ RequestIntraFrame_w();
+ break;
+ }
+ case MSG_SETCHANNELOPTIONS: {
+ VideoOptionsMessageData* data =
+ static_cast<VideoOptionsMessageData*>(pmsg->pdata);
+ data->result = SetChannelOptions_w(data->options);
+ break;
+ }
+ case MSG_CHANNEL_ERROR: {
+ const VideoChannelErrorMessageData* data =
+ static_cast<VideoChannelErrorMessageData*>(pmsg->pdata);
+ SignalMediaError(this, data->ssrc, data->error);
+ delete data;
+ break;
+ }
+ case MSG_HANDLEVIEWREQUEST: {
+ ViewRequestMessageData* data =
+ static_cast<ViewRequestMessageData*>(pmsg->pdata);
+ data->result = ApplyViewRequest_w(data->request);
+ break;
+ }
+ case MSG_SETSCREENCASTFACTORY: {
+ SetScreenCaptureFactoryMessageData* data =
+ static_cast<SetScreenCaptureFactoryMessageData*>(pmsg->pdata);
+ SetScreenCaptureFactory_w(data->screencapture_factory);
+ break;
+ }
+ case MSG_GETSTATS: {
+ VideoStatsMessageData* data =
+ static_cast<VideoStatsMessageData*>(pmsg->pdata);
+ data->result = GetStats_w(data->stats);
+ break;
+ }
+ default:
+ BaseChannel::OnMessage(pmsg);
+ break;
+ }
+}
+
+void VideoChannel::OnConnectionMonitorUpdate(
+ SocketMonitor *monitor, const std::vector<ConnectionInfo> &infos) {
+ SignalConnectionMonitor(this, infos);
+}
+
+// TODO(pthatcher): Look into removing duplicate code between
+// audio, video, and data, perhaps by using templates.
+void VideoChannel::OnMediaMonitorUpdate(
+ VideoMediaChannel* media_channel, const VideoMediaInfo &info) {
+ ASSERT(media_channel == this->media_channel());
+ SignalMediaMonitor(this, info);
+}
+
+void VideoChannel::OnScreencastWindowEvent(uint32 ssrc,
+ talk_base::WindowEvent event) {
+ ScreencastEventMessageData* pdata =
+ new ScreencastEventMessageData(ssrc, event);
+ signaling_thread()->Post(this, MSG_SCREENCASTWINDOWEVENT, pdata);
+}
+
+void VideoChannel::OnStateChange(VideoCapturer* capturer, CaptureState ev) {
+ // Map capturer events to window events. In the future we may want to simply
+ // pass these events up directly.
+ talk_base::WindowEvent we;
+ if (ev == CS_STOPPED) {
+ we = talk_base::WE_CLOSE;
+ } else if (ev == CS_PAUSED) {
+ we = talk_base::WE_MINIMIZE;
+ } else if (ev == CS_RUNNING && previous_we_ == talk_base::WE_MINIMIZE) {
+ we = talk_base::WE_RESTORE;
+ } else {
+ return;
+ }
+ previous_we_ = we;
+
+ uint32 ssrc = 0;
+ if (!GetLocalSsrc(capturer, &ssrc)) {
+ return;
+ }
+ ScreencastEventMessageData* pdata =
+ new ScreencastEventMessageData(ssrc, we);
+ signaling_thread()->Post(this, MSG_SCREENCASTWINDOWEVENT, pdata);
+}
+
+bool VideoChannel::GetLocalSsrc(const VideoCapturer* capturer, uint32* ssrc) {
+ *ssrc = 0;
+ for (ScreencastMap::iterator iter = screencast_capturers_.begin();
+ iter != screencast_capturers_.end(); ++iter) {
+ if (iter->second == capturer) {
+ *ssrc = iter->first;
+ return true;
+ }
+ }
+ return false;
+}
+
+void VideoChannel::OnVideoChannelError(uint32 ssrc,
+ VideoMediaChannel::Error error) {
+ VideoChannelErrorMessageData* data = new VideoChannelErrorMessageData(
+ ssrc, error);
+ signaling_thread()->Post(this, MSG_CHANNEL_ERROR, data);
+}
+
+void VideoChannel::OnSrtpError(uint32 ssrc, SrtpFilter::Mode mode,
+ SrtpFilter::Error error) {
+ switch (error) {
+ case SrtpFilter::ERROR_FAIL:
+ OnVideoChannelError(ssrc, (mode == SrtpFilter::PROTECT) ?
+ VideoMediaChannel::ERROR_REC_SRTP_ERROR :
+ VideoMediaChannel::ERROR_PLAY_SRTP_ERROR);
+ break;
+ case SrtpFilter::ERROR_AUTH:
+ OnVideoChannelError(ssrc, (mode == SrtpFilter::PROTECT) ?
+ VideoMediaChannel::ERROR_REC_SRTP_AUTH_FAILED :
+ VideoMediaChannel::ERROR_PLAY_SRTP_AUTH_FAILED);
+ break;
+ case SrtpFilter::ERROR_REPLAY:
+ // Only receving channel should have this error.
+ ASSERT(mode == SrtpFilter::UNPROTECT);
+ // TODO(gangji): Turn on the signaling of replay error once we have
+ // switched to the new mechanism for doing video retransmissions.
+ // OnVideoChannelError(ssrc, VideoMediaChannel::ERROR_PLAY_SRTP_REPLAY);
+ break;
+ default:
+ break;
+ }
+}
+
+
+void VideoChannel::GetSrtpCiphers(std::vector<std::string>* ciphers) const {
+ GetSupportedVideoCryptoSuites(ciphers);
+}
+
+DataChannel::DataChannel(talk_base::Thread* thread,
+ DataMediaChannel* media_channel,
+ BaseSession* session,
+ const std::string& content_name,
+ bool rtcp)
+ // MediaEngine is NULL
+ : BaseChannel(thread, NULL, media_channel, session, content_name, rtcp),
+ data_channel_type_(cricket::DCT_NONE) {
+}
+
+DataChannel::~DataChannel() {
+ StopMediaMonitor();
+ // this can't be done in the base class, since it calls a virtual
+ DisableMedia_w();
+}
+
+bool DataChannel::Init() {
+ TransportChannel* rtcp_channel = rtcp() ? session()->CreateChannel(
+ content_name(), "data_rtcp", ICE_CANDIDATE_COMPONENT_RTCP) : NULL;
+ if (!BaseChannel::Init(session()->CreateChannel(
+ content_name(), "data_rtp", ICE_CANDIDATE_COMPONENT_RTP),
+ rtcp_channel)) {
+ return false;
+ }
+ media_channel()->SignalDataReceived.connect(
+ this, &DataChannel::OnDataReceived);
+ media_channel()->SignalMediaError.connect(
+ this, &DataChannel::OnDataChannelError);
+ media_channel()->SignalReadyToSend.connect(
+ this, &DataChannel::OnDataChannelReadyToSend);
+ srtp_filter()->SignalSrtpError.connect(
+ this, &DataChannel::OnSrtpError);
+ return true;
+}
+
+bool DataChannel::SendData(const SendDataParams& params,
+ const talk_base::Buffer& payload,
+ SendDataResult* result) {
+ SendDataMessageData message_data(params, &payload, result);
+ Send(MSG_SENDDATA, &message_data);
+ return message_data.succeeded;
+}
+
+const ContentInfo* DataChannel::GetFirstContent(
+ const SessionDescription* sdesc) {
+ return GetFirstDataContent(sdesc);
+}
+
+
+static bool IsRtpPacket(const talk_base::Buffer* packet) {
+ int version;
+ if (!GetRtpVersion(packet->data(), packet->length(), &version)) {
+ return false;
+ }
+
+ return version == 2;
+}
+
+bool DataChannel::WantsPacket(bool rtcp, talk_base::Buffer* packet) {
+ if (data_channel_type_ == DCT_SCTP) {
+ // TODO(pthatcher): Do this in a more robust way by checking for
+ // SCTP or DTLS.
+ return !IsRtpPacket(packet);
+ } else if (data_channel_type_ == DCT_RTP) {
+ return BaseChannel::WantsPacket(rtcp, packet);
+ }
+ return false;
+}
+
+// Sets the maximum bandwidth. Anything over this will be dropped.
+bool DataChannel::SetMaxSendBandwidth_w(int max_bps) {
+ LOG(LS_INFO) << "DataChannel: Setting max bandwidth to " << max_bps;
+ return media_channel()->SetSendBandwidth(false, max_bps);
+}
+
+bool DataChannel::SetDataChannelType(DataChannelType new_data_channel_type) {
+ // It hasn't been set before, so set it now.
+ if (data_channel_type_ == DCT_NONE) {
+ data_channel_type_ = new_data_channel_type;
+ return true;
+ }
+
+ // It's been set before, but doesn't match. That's bad.
+ if (data_channel_type_ != new_data_channel_type) {
+ LOG(LS_WARNING) << "Data channel type mismatch."
+ << " Expected " << data_channel_type_
+ << " Got " << new_data_channel_type;
+ return false;
+ }
+
+ // It's hasn't changed. Nothing to do.
+ return true;
+}
+
+bool DataChannel::SetDataChannelTypeFromContent(
+ const DataContentDescription* content) {
+ bool is_sctp = ((content->protocol() == kMediaProtocolSctp) ||
+ (content->protocol() == kMediaProtocolDtlsSctp));
+ DataChannelType data_channel_type = is_sctp ? DCT_SCTP : DCT_RTP;
+ return SetDataChannelType(data_channel_type);
+}
+
+bool DataChannel::SetLocalContent_w(const MediaContentDescription* content,
+ ContentAction action) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+ LOG(LS_INFO) << "Setting local data description";
+
+ const DataContentDescription* data =
+ static_cast<const DataContentDescription*>(content);
+ ASSERT(data != NULL);
+ if (!data) return false;
+
+ bool ret = false;
+ if (!SetDataChannelTypeFromContent(data)) {
+ return false;
+ }
+
+ if (data_channel_type_ == DCT_SCTP) {
+ // SCTP data channels don't need the rest of the stuff.
+ ret = UpdateLocalStreams_w(data->streams(), action);
+ if (ret) {
+ set_local_content_direction(content->direction());
+ }
+ } else {
+ ret = SetBaseLocalContent_w(content, action);
+
+ if (action != CA_UPDATE || data->has_codecs()) {
+ ret &= media_channel()->SetRecvCodecs(data->codecs());
+ }
+ }
+
+ // If everything worked, see if we can start receiving.
+ if (ret) {
+ ChangeState();
+ } else {
+ LOG(LS_WARNING) << "Failed to set local data description";
+ }
+ return ret;
+}
+
+bool DataChannel::SetRemoteContent_w(const MediaContentDescription* content,
+ ContentAction action) {
+ ASSERT(worker_thread() == talk_base::Thread::Current());
+
+ const DataContentDescription* data =
+ static_cast<const DataContentDescription*>(content);
+ ASSERT(data != NULL);
+ if (!data) return false;
+
+ bool ret = true;
+ if (!SetDataChannelTypeFromContent(data)) {
+ return false;
+ }
+
+ if (data_channel_type_ == DCT_SCTP) {
+ LOG(LS_INFO) << "Setting SCTP remote data description";
+ // SCTP data channels don't need the rest of the stuff.
+ ret = UpdateRemoteStreams_w(content->streams(), action);
+ if (ret) {
+ set_remote_content_direction(content->direction());
+ }
+ } else {
+ // If the remote data doesn't have codecs and isn't an update, it
+ // must be empty, so ignore it.
+ if (action != CA_UPDATE && !data->has_codecs()) {
+ return true;
+ }
+ LOG(LS_INFO) << "Setting remote data description";
+
+ // Set remote video codecs (what the other side wants to receive).
+ if (action != CA_UPDATE || data->has_codecs()) {
+ ret &= media_channel()->SetSendCodecs(data->codecs());
+ }
+
+ if (ret) {
+ ret &= SetBaseRemoteContent_w(content, action);
+ }
+
+ if (action != CA_UPDATE) {
+ int bandwidth_bps = data->bandwidth();
+ bool auto_bandwidth = (bandwidth_bps == kAutoBandwidth);
+ ret &= media_channel()->SetSendBandwidth(auto_bandwidth, bandwidth_bps);
+ }
+ }
+
+ // If everything worked, see if we can start sending.
+ if (ret) {
+ ChangeState();
+ } else {
+ LOG(LS_WARNING) << "Failed to set remote data description";
+ }
+ return ret;
+}
+
+void DataChannel::ChangeState() {
+ // Render incoming data if we're the active call, and we have the local
+ // content. We receive data on the default channel and multiplexed streams.
+ bool recv = IsReadyToReceive();
+ if (!media_channel()->SetReceive(recv)) {
+ LOG(LS_ERROR) << "Failed to SetReceive on data channel";
+ }
+
+ // Send outgoing data if we're the active call, we have the remote content,
+ // and we have had some form of connectivity.
+ bool send = IsReadyToSend();
+ if (!media_channel()->SetSend(send)) {
+ LOG(LS_ERROR) << "Failed to SetSend on data channel";
+ }
+
+ // Post to trigger SignalReadyToSendData.
+ signaling_thread()->Post(this, MSG_READYTOSENDDATA,
+ new DataChannelReadyToSendMessageData(send));
+
+ LOG(LS_INFO) << "Changing data state, recv=" << recv << " send=" << send;
+}
+
+void DataChannel::OnMessage(talk_base::Message *pmsg) {
+ switch (pmsg->message_id) {
+ case MSG_READYTOSENDDATA: {
+ DataChannelReadyToSendMessageData* data =
+ static_cast<DataChannelReadyToSendMessageData*>(pmsg->pdata);
+ SignalReadyToSendData(data->data());
+ delete data;
+ break;
+ }
+ case MSG_SENDDATA: {
+ SendDataMessageData* msg =
+ static_cast<SendDataMessageData*>(pmsg->pdata);
+ msg->succeeded = media_channel()->SendData(
+ msg->params, *(msg->payload), msg->result);
+ break;
+ }
+ case MSG_DATARECEIVED: {
+ DataReceivedMessageData* data =
+ static_cast<DataReceivedMessageData*>(pmsg->pdata);
+ SignalDataReceived(this, data->params, data->payload);
+ delete data;
+ break;
+ }
+ case MSG_CHANNEL_ERROR: {
+ const DataChannelErrorMessageData* data =
+ static_cast<DataChannelErrorMessageData*>(pmsg->pdata);
+ SignalMediaError(this, data->ssrc, data->error);
+ delete data;
+ break;
+ }
+ default:
+ BaseChannel::OnMessage(pmsg);
+ break;
+ }
+}
+
+void DataChannel::OnConnectionMonitorUpdate(
+ SocketMonitor* monitor, const std::vector<ConnectionInfo>& infos) {
+ SignalConnectionMonitor(this, infos);
+}
+
+void DataChannel::StartMediaMonitor(int cms) {
+ media_monitor_.reset(new DataMediaMonitor(media_channel(), worker_thread(),
+ talk_base::Thread::Current()));
+ media_monitor_->SignalUpdate.connect(
+ this, &DataChannel::OnMediaMonitorUpdate);
+ media_monitor_->Start(cms);
+}
+
+void DataChannel::StopMediaMonitor() {
+ if (media_monitor_) {
+ media_monitor_->Stop();
+ media_monitor_->SignalUpdate.disconnect(this);
+ media_monitor_.reset();
+ }
+}
+
+void DataChannel::OnMediaMonitorUpdate(
+ DataMediaChannel* media_channel, const DataMediaInfo& info) {
+ ASSERT(media_channel == this->media_channel());
+ SignalMediaMonitor(this, info);
+}
+
+void DataChannel::OnDataReceived(
+ const ReceiveDataParams& params, const char* data, size_t len) {
+ DataReceivedMessageData* msg = new DataReceivedMessageData(
+ params, data, len);
+ signaling_thread()->Post(this, MSG_DATARECEIVED, msg);
+}
+
+void DataChannel::OnDataChannelError(
+ uint32 ssrc, DataMediaChannel::Error err) {
+ DataChannelErrorMessageData* data = new DataChannelErrorMessageData(
+ ssrc, err);
+ signaling_thread()->Post(this, MSG_CHANNEL_ERROR, data);
+}
+
+void DataChannel::OnDataChannelReadyToSend(bool writable) {
+ // This is usded for congestion control to indicate that the stream is ready
+ // to send by the MediaChannel, as opposed to OnReadyToSend, which indicates
+ // that the transport channel is ready.
+ signaling_thread()->Post(this, MSG_READYTOSENDDATA,
+ new DataChannelReadyToSendMessageData(writable));
+}
+
+void DataChannel::OnSrtpError(uint32 ssrc, SrtpFilter::Mode mode,
+ SrtpFilter::Error error) {
+ switch (error) {
+ case SrtpFilter::ERROR_FAIL:
+ OnDataChannelError(ssrc, (mode == SrtpFilter::PROTECT) ?
+ DataMediaChannel::ERROR_SEND_SRTP_ERROR :
+ DataMediaChannel::ERROR_RECV_SRTP_ERROR);
+ break;
+ case SrtpFilter::ERROR_AUTH:
+ OnDataChannelError(ssrc, (mode == SrtpFilter::PROTECT) ?
+ DataMediaChannel::ERROR_SEND_SRTP_AUTH_FAILED :
+ DataMediaChannel::ERROR_RECV_SRTP_AUTH_FAILED);
+ break;
+ case SrtpFilter::ERROR_REPLAY:
+ // Only receving channel should have this error.
+ ASSERT(mode == SrtpFilter::UNPROTECT);
+ OnDataChannelError(ssrc, DataMediaChannel::ERROR_RECV_SRTP_REPLAY);
+ break;
+ default:
+ break;
+ }
+}
+
+void DataChannel::GetSrtpCiphers(std::vector<std::string>* ciphers) const {
+ GetSupportedDataCryptoSuites(ciphers);
+}
+
+bool DataChannel::ShouldSetupDtlsSrtp() const {
+ return (data_channel_type_ == DCT_RTP);
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/channel.h b/chromium/third_party/libjingle/source/talk/session/media/channel.h
new file mode 100644
index 00000000000..eccadd32d60
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/channel.h
@@ -0,0 +1,689 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_CHANNEL_H_
+#define TALK_SESSION_MEDIA_CHANNEL_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/base/asyncudpsocket.h"
+#include "talk/base/criticalsection.h"
+#include "talk/base/network.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/window.h"
+#include "talk/media/base/mediachannel.h"
+#include "talk/media/base/mediaengine.h"
+#include "talk/media/base/screencastid.h"
+#include "talk/media/base/streamparams.h"
+#include "talk/media/base/videocapturer.h"
+#include "talk/p2p/base/session.h"
+#include "talk/p2p/client/socketmonitor.h"
+#include "talk/session/media/audiomonitor.h"
+#include "talk/session/media/mediamonitor.h"
+#include "talk/session/media/mediasession.h"
+#include "talk/session/media/rtcpmuxfilter.h"
+#include "talk/session/media/srtpfilter.h"
+#include "talk/session/media/ssrcmuxfilter.h"
+
+namespace cricket {
+
+struct CryptoParams;
+class MediaContentDescription;
+struct TypingMonitorOptions;
+class TypingMonitor;
+struct ViewRequest;
+
+enum SinkType {
+ SINK_PRE_CRYPTO, // Sink packets before encryption or after decryption.
+ SINK_POST_CRYPTO // Sink packets after encryption or before decryption.
+};
+
+// BaseChannel contains logic common to voice and video, including
+// enable/mute, marshaling calls to a worker thread, and
+// connection and media monitors.
+class BaseChannel
+ : public talk_base::MessageHandler, public sigslot::has_slots<>,
+ public MediaChannel::NetworkInterface {
+ public:
+ BaseChannel(talk_base::Thread* thread, MediaEngineInterface* media_engine,
+ MediaChannel* channel, BaseSession* session,
+ const std::string& content_name, bool rtcp);
+ virtual ~BaseChannel();
+ bool Init(TransportChannel* transport_channel,
+ TransportChannel* rtcp_transport_channel);
+
+ talk_base::Thread* worker_thread() const { return worker_thread_; }
+ BaseSession* session() const { return session_; }
+ const std::string& content_name() { return content_name_; }
+ TransportChannel* transport_channel() const {
+ return transport_channel_;
+ }
+ TransportChannel* rtcp_transport_channel() const {
+ return rtcp_transport_channel_;
+ }
+ bool enabled() const { return enabled_; }
+ // Set to true to have the channel optimistically allow data to be sent even
+ // when the channel isn't fully writable.
+ void set_optimistic_data_send(bool value) { optimistic_data_send_ = value; }
+ bool optimistic_data_send() const { return optimistic_data_send_; }
+
+ // This function returns true if we are using SRTP.
+ bool secure() const { return srtp_filter_.IsActive(); }
+ // The following function returns true if we are using
+ // DTLS-based keying. If you turned off SRTP later, however
+ // you could have secure() == false and dtls_secure() == true.
+ bool secure_dtls() const { return dtls_keyed_; }
+ // This function returns true if we require secure channel for call setup.
+ bool secure_required() const { return secure_required_; }
+
+ bool writable() const { return writable_; }
+ bool IsStreamMuted(uint32 ssrc);
+
+ // Channel control
+ bool SetLocalContent(const MediaContentDescription* content,
+ ContentAction action);
+ bool SetRemoteContent(const MediaContentDescription* content,
+ ContentAction action);
+ bool SetMaxSendBandwidth(int max_bandwidth);
+
+ bool Enable(bool enable);
+ // Mute sending media on the stream with SSRC |ssrc|
+ // If there is only one sending stream SSRC 0 can be used.
+ bool MuteStream(uint32 ssrc, bool mute);
+
+ // Multiplexing
+ bool AddRecvStream(const StreamParams& sp);
+ bool RemoveRecvStream(uint32 ssrc);
+
+ // Monitoring
+ void StartConnectionMonitor(int cms);
+ void StopConnectionMonitor();
+
+ void set_srtp_signal_silent_time(uint32 silent_time) {
+ srtp_filter_.set_signal_silent_time(silent_time);
+ }
+
+ void set_content_name(const std::string& content_name) {
+ ASSERT(signaling_thread()->IsCurrent());
+ ASSERT(!writable_);
+ if (session_->state() != BaseSession::STATE_INIT) {
+ LOG(LS_ERROR) << "Content name for a channel can be changed only "
+ << "when BaseSession is in STATE_INIT state.";
+ return;
+ }
+ content_name_ = content_name;
+ }
+
+ template <class T>
+ void RegisterSendSink(T* sink,
+ void (T::*OnPacket)(const void*, size_t, bool),
+ SinkType type) {
+ talk_base::CritScope cs(&signal_send_packet_cs_);
+ if (SINK_POST_CRYPTO == type) {
+ SignalSendPacketPostCrypto.disconnect(sink);
+ SignalSendPacketPostCrypto.connect(sink, OnPacket);
+ } else {
+ SignalSendPacketPreCrypto.disconnect(sink);
+ SignalSendPacketPreCrypto.connect(sink, OnPacket);
+ }
+ }
+
+ void UnregisterSendSink(sigslot::has_slots<>* sink,
+ SinkType type) {
+ talk_base::CritScope cs(&signal_send_packet_cs_);
+ if (SINK_POST_CRYPTO == type) {
+ SignalSendPacketPostCrypto.disconnect(sink);
+ } else {
+ SignalSendPacketPreCrypto.disconnect(sink);
+ }
+ }
+
+ bool HasSendSinks(SinkType type) {
+ talk_base::CritScope cs(&signal_send_packet_cs_);
+ if (SINK_POST_CRYPTO == type) {
+ return !SignalSendPacketPostCrypto.is_empty();
+ } else {
+ return !SignalSendPacketPreCrypto.is_empty();
+ }
+ }
+
+ template <class T>
+ void RegisterRecvSink(T* sink,
+ void (T::*OnPacket)(const void*, size_t, bool),
+ SinkType type) {
+ talk_base::CritScope cs(&signal_recv_packet_cs_);
+ if (SINK_POST_CRYPTO == type) {
+ SignalRecvPacketPostCrypto.disconnect(sink);
+ SignalRecvPacketPostCrypto.connect(sink, OnPacket);
+ } else {
+ SignalRecvPacketPreCrypto.disconnect(sink);
+ SignalRecvPacketPreCrypto.connect(sink, OnPacket);
+ }
+ }
+
+ void UnregisterRecvSink(sigslot::has_slots<>* sink,
+ SinkType type) {
+ talk_base::CritScope cs(&signal_recv_packet_cs_);
+ if (SINK_POST_CRYPTO == type) {
+ SignalRecvPacketPostCrypto.disconnect(sink);
+ } else {
+ SignalRecvPacketPreCrypto.disconnect(sink);
+ }
+ }
+
+ bool HasRecvSinks(SinkType type) {
+ talk_base::CritScope cs(&signal_recv_packet_cs_);
+ if (SINK_POST_CRYPTO == type) {
+ return !SignalRecvPacketPostCrypto.is_empty();
+ } else {
+ return !SignalRecvPacketPreCrypto.is_empty();
+ }
+ }
+
+ SsrcMuxFilter* ssrc_filter() { return &ssrc_filter_; }
+
+ const std::vector<StreamParams>& local_streams() const {
+ return local_streams_;
+ }
+ const std::vector<StreamParams>& remote_streams() const {
+ return remote_streams_;
+ }
+
+ // Used for latency measurements.
+ sigslot::signal1<BaseChannel*> SignalFirstPacketReceived;
+
+ // Used to alert UI when the muted status changes, perhaps autonomously.
+ sigslot::repeater2<BaseChannel*, bool> SignalAutoMuted;
+
+ // Made public for easier testing.
+ void SetReadyToSend(TransportChannel* channel, bool ready);
+
+ protected:
+ MediaEngineInterface* media_engine() const { return media_engine_; }
+ virtual MediaChannel* media_channel() const { return media_channel_; }
+ void set_rtcp_transport_channel(TransportChannel* transport);
+ bool was_ever_writable() const { return was_ever_writable_; }
+ void set_local_content_direction(MediaContentDirection direction) {
+ local_content_direction_ = direction;
+ }
+ void set_remote_content_direction(MediaContentDirection direction) {
+ remote_content_direction_ = direction;
+ }
+ bool IsReadyToReceive() const;
+ bool IsReadyToSend() const;
+ talk_base::Thread* signaling_thread() { return session_->signaling_thread(); }
+ SrtpFilter* srtp_filter() { return &srtp_filter_; }
+ bool rtcp() const { return rtcp_; }
+
+ void Send(uint32 id, talk_base::MessageData* pdata = NULL);
+ void Post(uint32 id, talk_base::MessageData* pdata = NULL);
+ void PostDelayed(int cmsDelay, uint32 id = 0,
+ talk_base::MessageData* pdata = NULL);
+ void Clear(uint32 id = talk_base::MQID_ANY,
+ talk_base::MessageList* removed = NULL);
+ void FlushRtcpMessages();
+
+ // NetworkInterface implementation, called by MediaEngine
+ virtual bool SendPacket(talk_base::Buffer* packet);
+ virtual bool SendRtcp(talk_base::Buffer* packet);
+ virtual int SetOption(SocketType type, talk_base::Socket::Option o, int val);
+
+ // From TransportChannel
+ void OnWritableState(TransportChannel* channel);
+ virtual void OnChannelRead(TransportChannel* channel, const char* data,
+ size_t len, int flags);
+ void OnReadyToSend(TransportChannel* channel);
+
+ bool PacketIsRtcp(const TransportChannel* channel, const char* data,
+ size_t len);
+ bool SendPacket(bool rtcp, talk_base::Buffer* packet);
+ virtual bool WantsPacket(bool rtcp, talk_base::Buffer* packet);
+ void HandlePacket(bool rtcp, talk_base::Buffer* packet);
+
+ // Apply the new local/remote session description.
+ void OnNewLocalDescription(BaseSession* session, ContentAction action);
+ void OnNewRemoteDescription(BaseSession* session, ContentAction action);
+
+ void EnableMedia_w();
+ void DisableMedia_w();
+ virtual bool MuteStream_w(uint32 ssrc, bool mute);
+ bool IsStreamMuted_w(uint32 ssrc);
+ void ChannelWritable_w();
+ void ChannelNotWritable_w();
+ bool AddRecvStream_w(const StreamParams& sp);
+ bool RemoveRecvStream_w(uint32 ssrc);
+ virtual bool ShouldSetupDtlsSrtp() const;
+ // Do the DTLS key expansion and impose it on the SRTP/SRTCP filters.
+ // |rtcp_channel| indicates whether to set up the RTP or RTCP filter.
+ bool SetupDtlsSrtp(bool rtcp_channel);
+ // Set the DTLS-SRTP cipher policy on this channel as appropriate.
+ bool SetDtlsSrtpCiphers(TransportChannel *tc, bool rtcp);
+
+ virtual void ChangeState() = 0;
+
+ // Gets the content info appropriate to the channel (audio or video).
+ virtual const ContentInfo* GetFirstContent(
+ const SessionDescription* sdesc) = 0;
+ bool UpdateLocalStreams_w(const std::vector<StreamParams>& streams,
+ ContentAction action);
+ bool UpdateRemoteStreams_w(const std::vector<StreamParams>& streams,
+ ContentAction action);
+ bool SetBaseLocalContent_w(const MediaContentDescription* content,
+ ContentAction action);
+ virtual bool SetLocalContent_w(const MediaContentDescription* content,
+ ContentAction action) = 0;
+ bool SetBaseRemoteContent_w(const MediaContentDescription* content,
+ ContentAction action);
+ virtual bool SetRemoteContent_w(const MediaContentDescription* content,
+ ContentAction action) = 0;
+
+ bool SetSrtp_w(const std::vector<CryptoParams>& params, ContentAction action,
+ ContentSource src);
+ bool SetRtcpMux_w(bool enable, ContentAction action, ContentSource src);
+
+ virtual bool SetMaxSendBandwidth_w(int max_bandwidth);
+
+ // From MessageHandler
+ virtual void OnMessage(talk_base::Message* pmsg);
+
+ // Handled in derived classes
+ // Get the SRTP ciphers to use for RTP media
+ virtual void GetSrtpCiphers(std::vector<std::string>* ciphers) const = 0;
+ virtual void OnConnectionMonitorUpdate(SocketMonitor* monitor,
+ const std::vector<ConnectionInfo>& infos) = 0;
+
+ private:
+ sigslot::signal3<const void*, size_t, bool> SignalSendPacketPreCrypto;
+ sigslot::signal3<const void*, size_t, bool> SignalSendPacketPostCrypto;
+ sigslot::signal3<const void*, size_t, bool> SignalRecvPacketPreCrypto;
+ sigslot::signal3<const void*, size_t, bool> SignalRecvPacketPostCrypto;
+ talk_base::CriticalSection signal_send_packet_cs_;
+ talk_base::CriticalSection signal_recv_packet_cs_;
+
+ talk_base::Thread* worker_thread_;
+ MediaEngineInterface* media_engine_;
+ BaseSession* session_;
+ MediaChannel* media_channel_;
+ std::vector<StreamParams> local_streams_;
+ std::vector<StreamParams> remote_streams_;
+
+ std::string content_name_;
+ bool rtcp_;
+ TransportChannel* transport_channel_;
+ TransportChannel* rtcp_transport_channel_;
+ SrtpFilter srtp_filter_;
+ RtcpMuxFilter rtcp_mux_filter_;
+ SsrcMuxFilter ssrc_filter_;
+ talk_base::scoped_ptr<SocketMonitor> socket_monitor_;
+ bool enabled_;
+ bool writable_;
+ bool rtp_ready_to_send_;
+ bool rtcp_ready_to_send_;
+ bool optimistic_data_send_;
+ bool was_ever_writable_;
+ MediaContentDirection local_content_direction_;
+ MediaContentDirection remote_content_direction_;
+ std::set<uint32> muted_streams_;
+ bool has_received_packet_;
+ bool dtls_keyed_;
+ bool secure_required_;
+};
+
+// VoiceChannel is a specialization that adds support for early media, DTMF,
+// and input/output level monitoring.
+class VoiceChannel : public BaseChannel {
+ public:
+ VoiceChannel(talk_base::Thread* thread, MediaEngineInterface* media_engine,
+ VoiceMediaChannel* channel, BaseSession* session,
+ const std::string& content_name, bool rtcp);
+ ~VoiceChannel();
+ bool Init();
+ bool SetRemoteRenderer(uint32 ssrc, AudioRenderer* renderer);
+ bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer);
+
+ // downcasts a MediaChannel
+ virtual VoiceMediaChannel* media_channel() const {
+ return static_cast<VoiceMediaChannel*>(BaseChannel::media_channel());
+ }
+
+ bool SetRingbackTone(const void* buf, int len);
+ void SetEarlyMedia(bool enable);
+ // This signal is emitted when we have gone a period of time without
+ // receiving early media. When received, a UI should start playing its
+ // own ringing sound
+ sigslot::signal1<VoiceChannel*> SignalEarlyMediaTimeout;
+
+ bool PlayRingbackTone(uint32 ssrc, bool play, bool loop);
+ // TODO(ronghuawu): Replace PressDTMF with InsertDtmf.
+ bool PressDTMF(int digit, bool playout);
+ // Returns if the telephone-event has been negotiated.
+ bool CanInsertDtmf();
+ // Send and/or play a DTMF |event| according to the |flags|.
+ // The DTMF out-of-band signal will be used on sending.
+ // The |ssrc| should be either 0 or a valid send stream ssrc.
+ // The valid value for the |event| are 0 which corresponding to DTMF
+ // event 0-9, *, #, A-D.
+ bool InsertDtmf(uint32 ssrc, int event_code, int duration, int flags);
+ bool SetOutputScaling(uint32 ssrc, double left, double right);
+ // Get statistics about the current media session.
+ bool GetStats(VoiceMediaInfo* stats);
+
+ // Monitoring functions
+ sigslot::signal2<VoiceChannel*, const std::vector<ConnectionInfo>&>
+ SignalConnectionMonitor;
+
+ void StartMediaMonitor(int cms);
+ void StopMediaMonitor();
+ sigslot::signal2<VoiceChannel*, const VoiceMediaInfo&> SignalMediaMonitor;
+
+ void StartAudioMonitor(int cms);
+ void StopAudioMonitor();
+ bool IsAudioMonitorRunning() const;
+ sigslot::signal2<VoiceChannel*, const AudioInfo&> SignalAudioMonitor;
+
+ void StartTypingMonitor(const TypingMonitorOptions& settings);
+ void StopTypingMonitor();
+ bool IsTypingMonitorRunning() const;
+
+ // Overrides BaseChannel::MuteStream_w.
+ virtual bool MuteStream_w(uint32 ssrc, bool mute);
+
+ int GetInputLevel_w();
+ int GetOutputLevel_w();
+ void GetActiveStreams_w(AudioInfo::StreamList* actives);
+
+ // Signal errors from VoiceMediaChannel. Arguments are:
+ // ssrc(uint32), and error(VoiceMediaChannel::Error).
+ sigslot::signal3<VoiceChannel*, uint32, VoiceMediaChannel::Error>
+ SignalMediaError;
+
+ // Configuration and setting.
+ bool SetChannelOptions(const AudioOptions& options);
+
+ private:
+ // overrides from BaseChannel
+ virtual void OnChannelRead(TransportChannel* channel,
+ const char* data, size_t len, int flags);
+ virtual void ChangeState();
+ virtual const ContentInfo* GetFirstContent(const SessionDescription* sdesc);
+ virtual bool SetLocalContent_w(const MediaContentDescription* content,
+ ContentAction action);
+ virtual bool SetRemoteContent_w(const MediaContentDescription* content,
+ ContentAction action);
+ bool SetRingbackTone_w(const void* buf, int len);
+ bool PlayRingbackTone_w(uint32 ssrc, bool play, bool loop);
+ void HandleEarlyMediaTimeout();
+ bool CanInsertDtmf_w();
+ bool InsertDtmf_w(uint32 ssrc, int event, int duration, int flags);
+ bool SetOutputScaling_w(uint32 ssrc, double left, double right);
+ bool GetStats_w(VoiceMediaInfo* stats);
+
+ virtual void OnMessage(talk_base::Message* pmsg);
+ virtual void GetSrtpCiphers(std::vector<std::string>* ciphers) const;
+ virtual void OnConnectionMonitorUpdate(
+ SocketMonitor* monitor, const std::vector<ConnectionInfo>& infos);
+ virtual void OnMediaMonitorUpdate(
+ VoiceMediaChannel* media_channel, const VoiceMediaInfo& info);
+ void OnAudioMonitorUpdate(AudioMonitor* monitor, const AudioInfo& info);
+ void OnVoiceChannelError(uint32 ssrc, VoiceMediaChannel::Error error);
+ void SendLastMediaError();
+ void OnSrtpError(uint32 ssrc, SrtpFilter::Mode mode, SrtpFilter::Error error);
+ // Configuration and setting.
+ bool SetChannelOptions_w(const AudioOptions& options);
+ bool SetRenderer_w(uint32 ssrc, AudioRenderer* renderer, bool is_local);
+
+ static const int kEarlyMediaTimeout = 1000;
+ bool received_media_;
+ talk_base::scoped_ptr<VoiceMediaMonitor> media_monitor_;
+ talk_base::scoped_ptr<AudioMonitor> audio_monitor_;
+ talk_base::scoped_ptr<TypingMonitor> typing_monitor_;
+};
+
+// VideoChannel is a specialization for video.
+class VideoChannel : public BaseChannel {
+ public:
+ // Make screen capturer virtual so that it can be overriden in testing.
+ // E.g. used to test that window events are triggered correctly.
+ class ScreenCapturerFactory {
+ public:
+ virtual VideoCapturer* CreateScreenCapturer(const ScreencastId& window) = 0;
+ virtual ~ScreenCapturerFactory() {}
+ };
+
+ VideoChannel(talk_base::Thread* thread, MediaEngineInterface* media_engine,
+ VideoMediaChannel* channel, BaseSession* session,
+ const std::string& content_name, bool rtcp,
+ VoiceChannel* voice_channel);
+ ~VideoChannel();
+ bool Init();
+
+ bool SetRenderer(uint32 ssrc, VideoRenderer* renderer);
+ bool ApplyViewRequest(const ViewRequest& request);
+
+ // TODO(pthatcher): Refactor to use a "capture id" instead of an
+ // ssrc here as the "key".
+ VideoCapturer* AddScreencast(uint32 ssrc, const ScreencastId& id);
+ VideoCapturer* GetScreencastCapturer(uint32 ssrc);
+ bool SetCapturer(uint32 ssrc, VideoCapturer* capturer);
+ bool RemoveScreencast(uint32 ssrc);
+ // True if we've added a screencast. Doesn't matter if the capturer
+ // has been started or not.
+ bool IsScreencasting();
+ int ScreencastFps(uint32 ssrc);
+ // Get statistics about the current media session.
+ bool GetStats(VideoMediaInfo* stats);
+
+ sigslot::signal2<VideoChannel*, const std::vector<ConnectionInfo>&>
+ SignalConnectionMonitor;
+
+ void StartMediaMonitor(int cms);
+ void StopMediaMonitor();
+ sigslot::signal2<VideoChannel*, const VideoMediaInfo&> SignalMediaMonitor;
+ sigslot::signal2<uint32, talk_base::WindowEvent> SignalScreencastWindowEvent;
+
+ bool SendIntraFrame();
+ bool RequestIntraFrame();
+ sigslot::signal3<VideoChannel*, uint32, VideoMediaChannel::Error>
+ SignalMediaError;
+
+ void SetScreenCaptureFactory(
+ ScreenCapturerFactory* screencapture_factory);
+
+ // Configuration and setting.
+ bool SetChannelOptions(const VideoOptions& options);
+
+ protected:
+ // downcasts a MediaChannel
+ virtual VideoMediaChannel* media_channel() const {
+ return static_cast<VideoMediaChannel*>(BaseChannel::media_channel());
+ }
+
+ private:
+ typedef std::map<uint32, VideoCapturer*> ScreencastMap;
+
+ // overrides from BaseChannel
+ virtual void ChangeState();
+ virtual const ContentInfo* GetFirstContent(const SessionDescription* sdesc);
+ virtual bool SetLocalContent_w(const MediaContentDescription* content,
+ ContentAction action);
+ virtual bool SetRemoteContent_w(const MediaContentDescription* content,
+ ContentAction action);
+ void SendIntraFrame_w() {
+ media_channel()->SendIntraFrame();
+ }
+ void RequestIntraFrame_w() {
+ media_channel()->RequestIntraFrame();
+ }
+
+ bool ApplyViewRequest_w(const ViewRequest& request);
+ void SetRenderer_w(uint32 ssrc, VideoRenderer* renderer);
+
+ VideoCapturer* AddScreencast_w(uint32 ssrc, const ScreencastId& id);
+ VideoCapturer* GetScreencastCapturer_w(uint32 ssrc);
+ bool SetCapturer_w(uint32 ssrc, VideoCapturer* capturer);
+ bool RemoveScreencast_w(uint32 ssrc);
+ void OnScreencastWindowEvent_s(uint32 ssrc, talk_base::WindowEvent we);
+ bool IsScreencasting_w() const;
+ int ScreencastFps_w(uint32 ssrc) const;
+ void SetScreenCaptureFactory_w(
+ ScreenCapturerFactory* screencapture_factory);
+ bool GetStats_w(VideoMediaInfo* stats);
+
+ virtual void OnMessage(talk_base::Message* pmsg);
+ virtual void GetSrtpCiphers(std::vector<std::string>* ciphers) const;
+ virtual void OnConnectionMonitorUpdate(
+ SocketMonitor* monitor, const std::vector<ConnectionInfo>& infos);
+ virtual void OnMediaMonitorUpdate(
+ VideoMediaChannel* media_channel, const VideoMediaInfo& info);
+ virtual void OnScreencastWindowEvent(uint32 ssrc,
+ talk_base::WindowEvent event);
+ virtual void OnStateChange(VideoCapturer* capturer, CaptureState ev);
+ bool GetLocalSsrc(const VideoCapturer* capturer, uint32* ssrc);
+
+ void OnVideoChannelError(uint32 ssrc, VideoMediaChannel::Error error);
+ void OnSrtpError(uint32 ssrc, SrtpFilter::Mode mode, SrtpFilter::Error error);
+ // Configuration and setting.
+ bool SetChannelOptions_w(const VideoOptions& options);
+
+ VoiceChannel* voice_channel_;
+ VideoRenderer* renderer_;
+ talk_base::scoped_ptr<ScreenCapturerFactory> screencapture_factory_;
+ ScreencastMap screencast_capturers_;
+ talk_base::scoped_ptr<VideoMediaMonitor> media_monitor_;
+
+ talk_base::WindowEvent previous_we_;
+};
+
+// DataChannel is a specialization for data.
+class DataChannel : public BaseChannel {
+ public:
+ DataChannel(talk_base::Thread* thread,
+ DataMediaChannel* media_channel,
+ BaseSession* session,
+ const std::string& content_name,
+ bool rtcp);
+ ~DataChannel();
+ bool Init();
+
+ // downcasts a MediaChannel
+ virtual DataMediaChannel* media_channel() const {
+ return static_cast<DataMediaChannel*>(BaseChannel::media_channel());
+ }
+
+ virtual bool SendData(const SendDataParams& params,
+ const talk_base::Buffer& payload,
+ SendDataResult* result);
+
+ void StartMediaMonitor(int cms);
+ void StopMediaMonitor();
+
+ sigslot::signal2<DataChannel*, const DataMediaInfo&> SignalMediaMonitor;
+ sigslot::signal2<DataChannel*, const std::vector<ConnectionInfo>&>
+ SignalConnectionMonitor;
+ sigslot::signal3<DataChannel*, uint32, DataMediaChannel::Error>
+ SignalMediaError;
+ sigslot::signal3<DataChannel*,
+ const ReceiveDataParams&,
+ const talk_base::Buffer&>
+ SignalDataReceived;
+ // Signal for notifying when the channel becomes ready to send data.
+ // That occurs when the channel is enabled, the transport is writable,
+ // both local and remote descriptions are set, and the channel is unblocked.
+ sigslot::signal1<bool> SignalReadyToSendData;
+
+ private:
+ struct SendDataMessageData : public talk_base::MessageData {
+ SendDataMessageData(const SendDataParams& params,
+ const talk_base::Buffer* payload,
+ SendDataResult* result)
+ : params(params),
+ payload(payload),
+ result(result),
+ succeeded(false) {
+ }
+
+ const SendDataParams& params;
+ const talk_base::Buffer* payload;
+ SendDataResult* result;
+ bool succeeded;
+ };
+
+ struct DataReceivedMessageData : public talk_base::MessageData {
+ // We copy the data because the data will become invalid after we
+ // handle DataMediaChannel::SignalDataReceived but before we fire
+ // SignalDataReceived.
+ DataReceivedMessageData(
+ const ReceiveDataParams& params, const char* data, size_t len)
+ : params(params),
+ payload(data, len) {
+ }
+ const ReceiveDataParams params;
+ const talk_base::Buffer payload;
+ };
+
+ typedef talk_base::TypedMessageData<bool> DataChannelReadyToSendMessageData;
+
+ // overrides from BaseChannel
+ virtual const ContentInfo* GetFirstContent(const SessionDescription* sdesc);
+ // If data_channel_type_ is DCT_NONE, set it. Otherwise, check that
+ // it's the same as what was set previously. Returns false if it's
+ // set to one type one type and changed to another type later.
+ bool SetDataChannelType(DataChannelType new_data_channel_type);
+ // Same as SetDataChannelType, but extracts the type from the
+ // DataContentDescription.
+ bool SetDataChannelTypeFromContent(const DataContentDescription* content);
+ virtual bool SetMaxSendBandwidth_w(int max_bandwidth);
+ virtual bool SetLocalContent_w(const MediaContentDescription* content,
+ ContentAction action);
+ virtual bool SetRemoteContent_w(const MediaContentDescription* content,
+ ContentAction action);
+ virtual void ChangeState();
+ virtual bool WantsPacket(bool rtcp, talk_base::Buffer* packet);
+
+ virtual void OnMessage(talk_base::Message* pmsg);
+ virtual void GetSrtpCiphers(std::vector<std::string>* ciphers) const;
+ virtual void OnConnectionMonitorUpdate(
+ SocketMonitor* monitor, const std::vector<ConnectionInfo>& infos);
+ virtual void OnMediaMonitorUpdate(
+ DataMediaChannel* media_channel, const DataMediaInfo& info);
+ virtual bool ShouldSetupDtlsSrtp() const;
+ void OnDataReceived(
+ const ReceiveDataParams& params, const char* data, size_t len);
+ void OnDataChannelError(uint32 ssrc, DataMediaChannel::Error error);
+ void OnDataChannelReadyToSend(bool writable);
+ void OnSrtpError(uint32 ssrc, SrtpFilter::Mode mode, SrtpFilter::Error error);
+
+ talk_base::scoped_ptr<DataMediaMonitor> media_monitor_;
+ // TODO(pthatcher): Make a separate SctpDataChannel and
+ // RtpDataChannel instead of using this.
+ DataChannelType data_channel_type_;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_CHANNEL_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/channel_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/channel_unittest.cc
new file mode 100644
index 00000000000..ff03b49805f
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/channel_unittest.cc
@@ -0,0 +1,2905 @@
+// libjingle
+// Copyright 2009 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "talk/base/fileutils.h"
+#include "talk/base/gunit.h"
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/pathutils.h"
+#include "talk/base/signalthread.h"
+#include "talk/base/ssladapter.h"
+#include "talk/base/sslidentity.h"
+#include "talk/base/window.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/media/base/fakertp.h"
+#include "talk/media/base/fakevideocapturer.h"
+#include "talk/media/base/mediachannel.h"
+#include "talk/media/base/rtpdump.h"
+#include "talk/media/base/screencastid.h"
+#include "talk/media/base/testutils.h"
+#include "talk/p2p/base/fakesession.h"
+#include "talk/session/media/channel.h"
+#include "talk/session/media/mediamessages.h"
+#include "talk/session/media/mediarecorder.h"
+#include "talk/session/media/mediasessionclient.h"
+#include "talk/session/media/typingmonitor.h"
+
+#define MAYBE_SKIP_TEST(feature) \
+ if (!(talk_base::SSLStreamAdapter::feature())) { \
+ LOG(LS_INFO) << "Feature disabled... skipping"; \
+ return; \
+ }
+
+using cricket::CA_OFFER;
+using cricket::CA_PRANSWER;
+using cricket::CA_ANSWER;
+using cricket::CA_UPDATE;
+using cricket::FakeVoiceMediaChannel;
+using cricket::ScreencastId;
+using cricket::StreamParams;
+using cricket::TransportChannel;
+using talk_base::WindowId;
+
+static const cricket::AudioCodec kPcmuCodec(0, "PCMU", 64000, 8000, 1, 0);
+static const cricket::AudioCodec kPcmaCodec(8, "PCMA", 64000, 8000, 1, 0);
+static const cricket::AudioCodec kIsacCodec(103, "ISAC", 40000, 16000, 1, 0);
+static const cricket::VideoCodec kH264Codec(97, "H264", 640, 400, 30, 0);
+static const cricket::VideoCodec kH264SvcCodec(99, "H264-SVC", 320, 200, 15, 0);
+static const cricket::DataCodec kGoogleDataCodec(101, "google-data", 0);
+static const uint32 kSsrc1 = 0x1111;
+static const uint32 kSsrc2 = 0x2222;
+static const uint32 kSsrc3 = 0x3333;
+static const char kCName[] = "a@b.com";
+
+template<class ChannelT,
+ class MediaChannelT,
+ class ContentT,
+ class CodecT,
+ class MediaInfoT>
+class Traits {
+ public:
+ typedef ChannelT Channel;
+ typedef MediaChannelT MediaChannel;
+ typedef ContentT Content;
+ typedef CodecT Codec;
+ typedef MediaInfoT MediaInfo;
+};
+
+class FakeScreenCaptureFactory
+ : public cricket::VideoChannel::ScreenCapturerFactory,
+ public sigslot::has_slots<> {
+ public:
+ FakeScreenCaptureFactory()
+ : window_capturer_(NULL),
+ capture_state_(cricket::CS_STOPPED) {}
+
+ virtual cricket::VideoCapturer* CreateScreenCapturer(
+ const ScreencastId& window) {
+ if (window_capturer_ != NULL) {
+ // Class is only designed to handle one fake screencapturer.
+ ADD_FAILURE();
+ return NULL;
+ }
+ window_capturer_ = new cricket::FakeVideoCapturer;
+ window_capturer_->SignalDestroyed.connect(
+ this,
+ &FakeScreenCaptureFactory::OnWindowCapturerDestroyed);
+ window_capturer_->SignalStateChange.connect(
+ this,
+ &FakeScreenCaptureFactory::OnStateChange);
+ return window_capturer_;
+ }
+
+ cricket::FakeVideoCapturer* window_capturer() { return window_capturer_; }
+
+ cricket::CaptureState capture_state() { return capture_state_; }
+
+ private:
+ void OnWindowCapturerDestroyed(cricket::FakeVideoCapturer* capturer) {
+ if (capturer == window_capturer_) {
+ window_capturer_ = NULL;
+ }
+ }
+ void OnStateChange(cricket::VideoCapturer*, cricket::CaptureState state) {
+ capture_state_ = state;
+ }
+
+ cricket::FakeVideoCapturer* window_capturer_;
+ cricket::CaptureState capture_state_;
+};
+
+// Controls how long we wait for a session to send messages that we
+// expect, in milliseconds. We put it high to avoid flaky tests.
+static const int kEventTimeout = 5000;
+
+class VoiceTraits : public Traits<cricket::VoiceChannel,
+ cricket::FakeVoiceMediaChannel,
+ cricket::AudioContentDescription,
+ cricket::AudioCodec,
+ cricket::VoiceMediaInfo> {
+};
+
+class VideoTraits : public Traits<cricket::VideoChannel,
+ cricket::FakeVideoMediaChannel,
+ cricket::VideoContentDescription,
+ cricket::VideoCodec,
+ cricket::VideoMediaInfo> {
+};
+
+class DataTraits : public Traits<cricket::DataChannel,
+ cricket::FakeDataMediaChannel,
+ cricket::DataContentDescription,
+ cricket::DataCodec,
+ cricket::DataMediaInfo> {
+};
+
+
+talk_base::StreamInterface* Open(const std::string& path) {
+ return talk_base::Filesystem::OpenFile(
+ talk_base::Pathname(path), "wb");
+}
+
+// Base class for Voice/VideoChannel tests
+template<class T>
+class ChannelTest : public testing::Test, public sigslot::has_slots<> {
+ public:
+ enum Flags { RTCP = 0x1, RTCP_MUX = 0x2, SECURE = 0x4, SSRC_MUX = 0x8,
+ DTLS = 0x10 };
+
+ ChannelTest(const uint8* rtp_data, int rtp_len,
+ const uint8* rtcp_data, int rtcp_len)
+ : session1_(true),
+ session2_(false),
+ media_channel1_(NULL),
+ media_channel2_(NULL),
+ rtp_packet_(reinterpret_cast<const char*>(rtp_data), rtp_len),
+ rtcp_packet_(reinterpret_cast<const char*>(rtcp_data), rtcp_len),
+ media_info_callbacks1_(),
+ media_info_callbacks2_(),
+ mute_callback_recved_(false),
+ mute_callback_value_(false),
+ ssrc_(0),
+ error_(T::MediaChannel::ERROR_NONE) {
+ }
+
+ static void SetUpTestCase() {
+ talk_base::InitializeSSL();
+ }
+
+ static void TearDownTestCase() {
+ talk_base::CleanupSSL();
+ }
+
+ void CreateChannels(int flags1, int flags2) {
+ CreateChannels(new typename T::MediaChannel(NULL),
+ new typename T::MediaChannel(NULL),
+ flags1, flags2, talk_base::Thread::Current());
+ }
+ void CreateChannels(int flags) {
+ CreateChannels(new typename T::MediaChannel(NULL),
+ new typename T::MediaChannel(NULL),
+ flags, talk_base::Thread::Current());
+ }
+ void CreateChannels(int flags1, int flags2,
+ talk_base::Thread* thread) {
+ CreateChannels(new typename T::MediaChannel(NULL),
+ new typename T::MediaChannel(NULL),
+ flags1, flags2, thread);
+ }
+ void CreateChannels(int flags,
+ talk_base::Thread* thread) {
+ CreateChannels(new typename T::MediaChannel(NULL),
+ new typename T::MediaChannel(NULL),
+ flags, thread);
+ }
+ void CreateChannels(
+ typename T::MediaChannel* ch1, typename T::MediaChannel* ch2,
+ int flags1, int flags2, talk_base::Thread* thread) {
+ media_channel1_ = ch1;
+ media_channel2_ = ch2;
+ channel1_.reset(CreateChannel(thread, &media_engine_, ch1, &session1_,
+ (flags1 & RTCP) != 0));
+ channel2_.reset(CreateChannel(thread, &media_engine_, ch2, &session2_,
+ (flags2 & RTCP) != 0));
+ channel1_->SignalMediaMonitor.connect(
+ this, &ChannelTest<T>::OnMediaMonitor);
+ channel2_->SignalMediaMonitor.connect(
+ this, &ChannelTest<T>::OnMediaMonitor);
+ channel1_->SignalMediaError.connect(
+ this, &ChannelTest<T>::OnMediaChannelError);
+ channel2_->SignalMediaError.connect(
+ this, &ChannelTest<T>::OnMediaChannelError);
+ channel1_->SignalAutoMuted.connect(
+ this, &ChannelTest<T>::OnMediaMuted);
+ CreateContent(flags1, kPcmuCodec, kH264Codec,
+ &local_media_content1_);
+ CreateContent(flags2, kPcmuCodec, kH264Codec,
+ &local_media_content2_);
+ CopyContent(local_media_content1_, &remote_media_content1_);
+ CopyContent(local_media_content2_, &remote_media_content2_);
+
+ if (flags1 & DTLS) {
+ identity1_.reset(talk_base::SSLIdentity::Generate("session1"));
+ session1_.set_ssl_identity(identity1_.get());
+ }
+ if (flags2 & DTLS) {
+ identity2_.reset(talk_base::SSLIdentity::Generate("session2"));
+ session2_.set_ssl_identity(identity2_.get());
+ }
+
+ // Add stream information (SSRC) to the local content but not to the remote
+ // content. This means that we per default know the SSRC of what we send but
+ // not what we receive.
+ AddLegacyStreamInContent(kSsrc1, flags1, &local_media_content1_);
+ AddLegacyStreamInContent(kSsrc2, flags2, &local_media_content2_);
+
+ // If SSRC_MUX is used we also need to know the SSRC of the incoming stream.
+ if (flags1 & SSRC_MUX) {
+ AddLegacyStreamInContent(kSsrc1, flags1, &remote_media_content1_);
+ }
+ if (flags2 & SSRC_MUX) {
+ AddLegacyStreamInContent(kSsrc2, flags2, &remote_media_content2_);
+ }
+ }
+
+ void CreateChannels(
+ typename T::MediaChannel* ch1, typename T::MediaChannel* ch2,
+ int flags, talk_base::Thread* thread) {
+ media_channel1_ = ch1;
+ media_channel2_ = ch2;
+
+ channel1_.reset(CreateChannel(thread, &media_engine_, ch1, &session1_,
+ (flags & RTCP) != 0));
+ channel2_.reset(CreateChannel(thread, &media_engine_, ch2, &session1_,
+ (flags & RTCP) != 0));
+ channel1_->SignalMediaMonitor.connect(
+ this, &ChannelTest<T>::OnMediaMonitor);
+ channel2_->SignalMediaMonitor.connect(
+ this, &ChannelTest<T>::OnMediaMonitor);
+ channel2_->SignalMediaError.connect(
+ this, &ChannelTest<T>::OnMediaChannelError);
+ CreateContent(flags, kPcmuCodec, kH264Codec,
+ &local_media_content1_);
+ CreateContent(flags, kPcmuCodec, kH264Codec,
+ &local_media_content2_);
+ CopyContent(local_media_content1_, &remote_media_content1_);
+ CopyContent(local_media_content2_, &remote_media_content2_);
+ // Add stream information (SSRC) to the local content but not to the remote
+ // content. This means that we per default know the SSRC of what we send but
+ // not what we receive.
+ AddLegacyStreamInContent(kSsrc1, flags, &local_media_content1_);
+ AddLegacyStreamInContent(kSsrc2, flags, &local_media_content2_);
+
+ // If SSRC_MUX is used we also need to know the SSRC of the incoming stream.
+ if (flags & SSRC_MUX) {
+ AddLegacyStreamInContent(kSsrc1, flags, &remote_media_content1_);
+ AddLegacyStreamInContent(kSsrc2, flags, &remote_media_content2_);
+ }
+ }
+
+ typename T::Channel* CreateChannel(talk_base::Thread* thread,
+ cricket::MediaEngineInterface* engine,
+ typename T::MediaChannel* ch,
+ cricket::BaseSession* session,
+ bool rtcp) {
+ typename T::Channel* channel = new typename T::Channel(
+ thread, engine, ch, session, cricket::CN_AUDIO, rtcp);
+ if (!channel->Init()) {
+ delete channel;
+ channel = NULL;
+ }
+ return channel;
+ }
+
+ bool SendInitiate() {
+ bool result = channel1_->SetLocalContent(&local_media_content1_, CA_OFFER);
+ if (result) {
+ channel1_->Enable(true);
+ result = channel2_->SetRemoteContent(&remote_media_content1_, CA_OFFER);
+ if (result) {
+ session1_.Connect(&session2_);
+
+ result = channel2_->SetLocalContent(&local_media_content2_, CA_ANSWER);
+ }
+ }
+ return result;
+ }
+
+ bool SendAccept() {
+ channel2_->Enable(true);
+ return channel1_->SetRemoteContent(&remote_media_content2_, CA_ANSWER);
+ }
+
+ bool SendOffer() {
+ bool result = channel1_->SetLocalContent(&local_media_content1_, CA_OFFER);
+ if (result) {
+ channel1_->Enable(true);
+ result = channel2_->SetRemoteContent(&remote_media_content1_, CA_OFFER);
+ }
+ return result;
+ }
+
+ bool SendProvisionalAnswer() {
+ bool result = channel2_->SetLocalContent(&local_media_content2_,
+ CA_PRANSWER);
+ if (result) {
+ channel2_->Enable(true);
+ result = channel1_->SetRemoteContent(&remote_media_content2_,
+ CA_PRANSWER);
+ session1_.Connect(&session2_);
+ }
+ return result;
+ }
+
+ bool SendFinalAnswer() {
+ bool result = channel2_->SetLocalContent(&local_media_content2_, CA_ANSWER);
+ if (result)
+ result = channel1_->SetRemoteContent(&remote_media_content2_, CA_ANSWER);
+ return result;
+ }
+
+ bool SendTerminate() {
+ channel1_.reset();
+ channel2_.reset();
+ return true;
+ }
+
+ bool AddStream1(int id) {
+ return channel1_->AddRecvStream(cricket::StreamParams::CreateLegacy(id));
+ }
+ bool RemoveStream1(int id) {
+ return channel1_->RemoveRecvStream(id);
+ }
+
+ cricket::FakeTransport* GetTransport1() {
+ return session1_.GetTransport(channel1_->content_name());
+ }
+ cricket::FakeTransport* GetTransport2() {
+ return session2_.GetTransport(channel2_->content_name());
+ }
+
+ bool SendRtp1() {
+ return media_channel1_->SendRtp(rtp_packet_.c_str(),
+ static_cast<int>(rtp_packet_.size()));
+ }
+ bool SendRtp2() {
+ return media_channel2_->SendRtp(rtp_packet_.c_str(),
+ static_cast<int>(rtp_packet_.size()));
+ }
+ bool SendRtcp1() {
+ return media_channel1_->SendRtcp(rtcp_packet_.c_str(),
+ static_cast<int>(rtcp_packet_.size()));
+ }
+ bool SendRtcp2() {
+ return media_channel2_->SendRtcp(rtcp_packet_.c_str(),
+ static_cast<int>(rtcp_packet_.size()));
+ }
+ // Methods to send custom data.
+ bool SendCustomRtp1(uint32 ssrc, int sequence_number) {
+ std::string data(CreateRtpData(ssrc, sequence_number));
+ return media_channel1_->SendRtp(data.c_str(),
+ static_cast<int>(data.size()));
+ }
+ bool SendCustomRtp2(uint32 ssrc, int sequence_number) {
+ std::string data(CreateRtpData(ssrc, sequence_number));
+ return media_channel2_->SendRtp(data.c_str(),
+ static_cast<int>(data.size()));
+ }
+ bool SendCustomRtcp1(uint32 ssrc) {
+ std::string data(CreateRtcpData(ssrc));
+ return media_channel1_->SendRtcp(data.c_str(),
+ static_cast<int>(data.size()));
+ }
+ bool SendCustomRtcp2(uint32 ssrc) {
+ std::string data(CreateRtcpData(ssrc));
+ return media_channel2_->SendRtcp(data.c_str(),
+ static_cast<int>(data.size()));
+ }
+ bool CheckRtp1() {
+ return media_channel1_->CheckRtp(rtp_packet_.c_str(),
+ static_cast<int>(rtp_packet_.size()));
+ }
+ bool CheckRtp2() {
+ return media_channel2_->CheckRtp(rtp_packet_.c_str(),
+ static_cast<int>(rtp_packet_.size()));
+ }
+ bool CheckRtcp1() {
+ return media_channel1_->CheckRtcp(rtcp_packet_.c_str(),
+ static_cast<int>(rtcp_packet_.size()));
+ }
+ bool CheckRtcp2() {
+ return media_channel2_->CheckRtcp(rtcp_packet_.c_str(),
+ static_cast<int>(rtcp_packet_.size()));
+ }
+ // Methods to check custom data.
+ bool CheckCustomRtp1(uint32 ssrc, int sequence_number) {
+ std::string data(CreateRtpData(ssrc, sequence_number));
+ return media_channel1_->CheckRtp(data.c_str(),
+ static_cast<int>(data.size()));
+ }
+ bool CheckCustomRtp2(uint32 ssrc, int sequence_number) {
+ std::string data(CreateRtpData(ssrc, sequence_number));
+ return media_channel2_->CheckRtp(data.c_str(),
+ static_cast<int>(data.size()));
+ }
+ bool CheckCustomRtcp1(uint32 ssrc) {
+ std::string data(CreateRtcpData(ssrc));
+ return media_channel1_->CheckRtcp(data.c_str(),
+ static_cast<int>(data.size()));
+ }
+ bool CheckCustomRtcp2(uint32 ssrc) {
+ std::string data(CreateRtcpData(ssrc));
+ return media_channel2_->CheckRtcp(data.c_str(),
+ static_cast<int>(data.size()));
+ }
+ std::string CreateRtpData(uint32 ssrc, int sequence_number) {
+ std::string data(rtp_packet_);
+ // Set SSRC in the rtp packet copy.
+ talk_base::SetBE32(const_cast<char*>(data.c_str()) + 8, ssrc);
+ talk_base::SetBE16(const_cast<char*>(data.c_str()) + 2, sequence_number);
+ return data;
+ }
+ std::string CreateRtcpData(uint32 ssrc) {
+ std::string data(rtcp_packet_);
+ // Set SSRC in the rtcp packet copy.
+ talk_base::SetBE32(const_cast<char*>(data.c_str()) + 4, ssrc);
+ return data;
+ }
+
+ bool CheckNoRtp1() {
+ return media_channel1_->CheckNoRtp();
+ }
+ bool CheckNoRtp2() {
+ return media_channel2_->CheckNoRtp();
+ }
+ bool CheckNoRtcp1() {
+ return media_channel1_->CheckNoRtcp();
+ }
+ bool CheckNoRtcp2() {
+ return media_channel2_->CheckNoRtcp();
+ }
+
+ void CreateContent(int flags,
+ const cricket::AudioCodec& audio_codec,
+ const cricket::VideoCodec& video_codec,
+ typename T::Content* content) {
+ // overridden in specialized classes
+ }
+ void CopyContent(const typename T::Content& source,
+ typename T::Content* content) {
+ // overridden in specialized classes
+ }
+
+ void SetOptimisticDataSend(bool optimistic_data_send) {
+ channel1_->set_optimistic_data_send(optimistic_data_send);
+ channel2_->set_optimistic_data_send(optimistic_data_send);
+ }
+
+ // Creates a cricket::SessionDescription with one MediaContent and one stream.
+ // kPcmuCodec is used as audio codec and kH264Codec is used as video codec.
+ cricket::SessionDescription* CreateSessionDescriptionWithStream(uint32 ssrc) {
+ typename T::Content content;
+ cricket::SessionDescription* sdesc = new cricket::SessionDescription();
+ CreateContent(SECURE, kPcmuCodec, kH264Codec, &content);
+ AddLegacyStreamInContent(ssrc, 0, &content);
+ sdesc->AddContent("DUMMY_CONTENT_NAME",
+ cricket::NS_JINGLE_RTP, content.Copy());
+ return sdesc;
+ }
+
+ class CallThread : public talk_base::SignalThread {
+ public:
+ typedef bool (ChannelTest<T>::*Method)();
+ CallThread(ChannelTest<T>* obj, Method method, bool* result)
+ : obj_(obj),
+ method_(method),
+ result_(result) {
+ *result = false;
+ }
+ virtual void DoWork() {
+ bool result = (*obj_.*method_)();
+ if (result_) {
+ *result_ = result;
+ }
+ }
+ private:
+ ChannelTest<T>* obj_;
+ Method method_;
+ bool* result_;
+ };
+ void CallOnThread(typename CallThread::Method method, bool* result) {
+ CallThread* thread = new CallThread(this, method, result);
+ thread->Start();
+ thread->Release();
+ }
+
+ void CallOnThreadAndWaitForDone(typename CallThread::Method method,
+ bool* result) {
+ CallThread* thread = new CallThread(this, method, result);
+ thread->Start();
+ thread->Destroy(true);
+ }
+
+ bool CodecMatches(const typename T::Codec& c1, const typename T::Codec& c2) {
+ return false; // overridden in specialized classes
+ }
+
+ void OnMediaMonitor(typename T::Channel* channel,
+ const typename T::MediaInfo& info) {
+ if (channel == channel1_.get()) {
+ media_info_callbacks1_++;
+ } else if (channel == channel2_.get()) {
+ media_info_callbacks2_++;
+ }
+ }
+
+ void OnMediaChannelError(typename T::Channel* channel,
+ uint32 ssrc,
+ typename T::MediaChannel::Error error) {
+ ssrc_ = ssrc;
+ error_ = error;
+ }
+
+ void OnMediaMuted(cricket::BaseChannel* channel, bool muted) {
+ mute_callback_recved_ = true;
+ mute_callback_value_ = muted;
+ }
+
+ void AddLegacyStreamInContent(uint32 ssrc, int flags,
+ typename T::Content* content) {
+ // Base implementation.
+ }
+
+ // Tests that can be used by derived classes.
+
+ // Basic sanity check.
+ void TestInit() {
+ CreateChannels(0, 0);
+ EXPECT_FALSE(channel1_->secure());
+ EXPECT_FALSE(media_channel1_->sending());
+ EXPECT_FALSE(media_channel1_->playout());
+ EXPECT_TRUE(media_channel1_->codecs().empty());
+ EXPECT_TRUE(media_channel1_->recv_streams().empty());
+ EXPECT_TRUE(media_channel1_->rtp_packets().empty());
+ EXPECT_TRUE(media_channel1_->rtcp_packets().empty());
+ }
+
+ // Test that SetLocalContent and SetRemoteContent properly configure
+ // the codecs.
+ void TestSetContents() {
+ CreateChannels(0, 0);
+ typename T::Content content;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content, CA_OFFER));
+ EXPECT_EQ(0U, media_channel1_->codecs().size());
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content, CA_ANSWER));
+ ASSERT_EQ(1U, media_channel1_->codecs().size());
+ EXPECT_TRUE(CodecMatches(content.codecs()[0],
+ media_channel1_->codecs()[0]));
+ }
+
+ // Test that SetLocalContent and SetRemoteContent properly deals
+ // with an empty offer.
+ void TestSetContentsNullOffer() {
+ CreateChannels(0, 0);
+ typename T::Content content;
+ EXPECT_TRUE(channel1_->SetLocalContent(&content, CA_OFFER));
+ CreateContent(0, kPcmuCodec, kH264Codec, &content);
+ EXPECT_EQ(0U, media_channel1_->codecs().size());
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content, CA_ANSWER));
+ ASSERT_EQ(1U, media_channel1_->codecs().size());
+ EXPECT_TRUE(CodecMatches(content.codecs()[0],
+ media_channel1_->codecs()[0]));
+ }
+
+ // Test that SetLocalContent and SetRemoteContent properly set RTCP
+ // mux.
+ void TestSetContentsRtcpMux() {
+ CreateChannels(RTCP, RTCP);
+ EXPECT_TRUE(channel1_->rtcp_transport_channel() != NULL);
+ EXPECT_TRUE(channel2_->rtcp_transport_channel() != NULL);
+ typename T::Content content;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content);
+ // Both sides agree on mux. Should no longer be a separate RTCP channel.
+ content.set_rtcp_mux(true);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content, CA_OFFER));
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content, CA_ANSWER));
+ EXPECT_TRUE(channel1_->rtcp_transport_channel() == NULL);
+ // Only initiator supports mux. Should still have a separate RTCP channel.
+ EXPECT_TRUE(channel2_->SetLocalContent(&content, CA_OFFER));
+ content.set_rtcp_mux(false);
+ EXPECT_TRUE(channel2_->SetRemoteContent(&content, CA_ANSWER));
+ EXPECT_TRUE(channel2_->rtcp_transport_channel() != NULL);
+ }
+
+ // Test that SetLocalContent and SetRemoteContent properly set RTCP
+ // mux when a provisional answer is received.
+ void TestSetContentsRtcpMuxWithPrAnswer() {
+ CreateChannels(RTCP, RTCP);
+ EXPECT_TRUE(channel1_->rtcp_transport_channel() != NULL);
+ EXPECT_TRUE(channel2_->rtcp_transport_channel() != NULL);
+ typename T::Content content;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content);
+ content.set_rtcp_mux(true);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content, CA_OFFER));
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content, CA_PRANSWER));
+ EXPECT_TRUE(channel1_->rtcp_transport_channel() != NULL);
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content, CA_ANSWER));
+ // Both sides agree on mux. Should no longer be a separate RTCP channel.
+ EXPECT_TRUE(channel1_->rtcp_transport_channel() == NULL);
+ // Only initiator supports mux. Should still have a separate RTCP channel.
+ EXPECT_TRUE(channel2_->SetLocalContent(&content, CA_OFFER));
+ content.set_rtcp_mux(false);
+ EXPECT_TRUE(channel2_->SetRemoteContent(&content, CA_PRANSWER));
+ EXPECT_TRUE(channel2_->SetRemoteContent(&content, CA_ANSWER));
+ EXPECT_TRUE(channel2_->rtcp_transport_channel() != NULL);
+ }
+
+ // Test that SetLocalContent and SetRemoteContent properly set
+ // video options to the media channel.
+ void TestSetContentsVideoOptions() {
+ CreateChannels(0, 0);
+ typename T::Content content;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content);
+ content.set_buffered_mode_latency(101);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content, CA_OFFER));
+ EXPECT_EQ(0U, media_channel1_->codecs().size());
+ cricket::VideoOptions options;
+ ASSERT_TRUE(media_channel1_->GetOptions(&options));
+ int latency = 0;
+ EXPECT_TRUE(options.buffered_mode_latency.Get(&latency));
+ EXPECT_EQ(101, latency);
+ content.set_buffered_mode_latency(102);
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content, CA_ANSWER));
+ ASSERT_EQ(1U, media_channel1_->codecs().size());
+ EXPECT_TRUE(CodecMatches(content.codecs()[0],
+ media_channel1_->codecs()[0]));
+ ASSERT_TRUE(media_channel1_->GetOptions(&options));
+ EXPECT_TRUE(options.buffered_mode_latency.Get(&latency));
+ EXPECT_EQ(102, latency);
+ }
+
+ // Test that SetRemoteContent properly deals with a content update.
+ void TestSetRemoteContentUpdate() {
+ CreateChannels(0, 0);
+ typename T::Content content;
+ CreateContent(RTCP | RTCP_MUX | SECURE,
+ kPcmuCodec, kH264Codec,
+ &content);
+ EXPECT_EQ(0U, media_channel1_->codecs().size());
+ EXPECT_TRUE(channel1_->SetLocalContent(&content, CA_OFFER));
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content, CA_ANSWER));
+ ASSERT_EQ(1U, media_channel1_->codecs().size());
+ EXPECT_TRUE(CodecMatches(content.codecs()[0],
+ media_channel1_->codecs()[0]));
+ // Now update with other codecs.
+ typename T::Content update_content;
+ update_content.set_partial(true);
+ CreateContent(0, kIsacCodec, kH264SvcCodec,
+ &update_content);
+ EXPECT_TRUE(channel1_->SetRemoteContent(&update_content, CA_UPDATE));
+ ASSERT_EQ(1U, media_channel1_->codecs().size());
+ EXPECT_TRUE(CodecMatches(update_content.codecs()[0],
+ media_channel1_->codecs()[0]));
+ // Now update without any codecs. This is ignored.
+ typename T::Content empty_content;
+ empty_content.set_partial(true);
+ EXPECT_TRUE(channel1_->SetRemoteContent(&empty_content, CA_UPDATE));
+ ASSERT_EQ(1U, media_channel1_->codecs().size());
+ EXPECT_TRUE(CodecMatches(update_content.codecs()[0],
+ media_channel1_->codecs()[0]));
+ }
+
+ // Test that Add/RemoveStream properly forward to the media channel.
+ void TestStreams() {
+ CreateChannels(0, 0);
+ EXPECT_TRUE(AddStream1(1));
+ EXPECT_TRUE(AddStream1(2));
+ EXPECT_EQ(2U, media_channel1_->recv_streams().size());
+ EXPECT_TRUE(RemoveStream1(2));
+ EXPECT_EQ(1U, media_channel1_->recv_streams().size());
+ EXPECT_TRUE(RemoveStream1(1));
+ EXPECT_EQ(0U, media_channel1_->recv_streams().size());
+ }
+
+ // Test that SetLocalContent properly handles adding and removing StreamParams
+ // to the local content description.
+ // This test uses the CA_UPDATE action that don't require a full
+ // MediaContentDescription to do an update.
+ void TestUpdateStreamsInLocalContent() {
+ cricket::StreamParams stream1;
+ stream1.groupid = "group1";
+ stream1.id = "stream1";
+ stream1.ssrcs.push_back(kSsrc1);
+ stream1.cname = "stream1_cname";
+
+ cricket::StreamParams stream2;
+ stream2.groupid = "group2";
+ stream2.id = "stream2";
+ stream2.ssrcs.push_back(kSsrc2);
+ stream2.cname = "stream2_cname";
+
+ cricket::StreamParams stream3;
+ stream3.groupid = "group3";
+ stream3.id = "stream3";
+ stream3.ssrcs.push_back(kSsrc3);
+ stream3.cname = "stream3_cname";
+
+ CreateChannels(0, 0);
+ typename T::Content content1;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content1);
+ content1.AddStream(stream1);
+ EXPECT_EQ(0u, media_channel1_->send_streams().size());
+ EXPECT_TRUE(channel1_->SetLocalContent(&content1, CA_OFFER));
+
+ ASSERT_EQ(1u, media_channel1_->send_streams().size());
+ EXPECT_EQ(stream1, media_channel1_->send_streams()[0]);
+
+ // Update the local streams by adding another sending stream.
+ // Use a partial updated session description.
+ typename T::Content content2;
+ content2.AddStream(stream2);
+ content2.AddStream(stream3);
+ content2.set_partial(true);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content2, CA_UPDATE));
+ ASSERT_EQ(3u, media_channel1_->send_streams().size());
+ EXPECT_EQ(stream1, media_channel1_->send_streams()[0]);
+ EXPECT_EQ(stream2, media_channel1_->send_streams()[1]);
+ EXPECT_EQ(stream3, media_channel1_->send_streams()[2]);
+
+ // Update the local streams by removing the first sending stream.
+ // This is done by removing all SSRCS for this particular stream.
+ typename T::Content content3;
+ stream1.ssrcs.clear();
+ content3.AddStream(stream1);
+ content3.set_partial(true);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content3, CA_UPDATE));
+ ASSERT_EQ(2u, media_channel1_->send_streams().size());
+ EXPECT_EQ(stream2, media_channel1_->send_streams()[0]);
+ EXPECT_EQ(stream3, media_channel1_->send_streams()[1]);
+
+ // Update the local streams with a stream that does not change.
+ // THe update is ignored.
+ typename T::Content content4;
+ content4.AddStream(stream2);
+ content4.set_partial(true);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content4, CA_UPDATE));
+ ASSERT_EQ(2u, media_channel1_->send_streams().size());
+ EXPECT_EQ(stream2, media_channel1_->send_streams()[0]);
+ EXPECT_EQ(stream3, media_channel1_->send_streams()[1]);
+ }
+
+ // Test that SetRemoteContent properly handles adding and removing
+ // StreamParams to the remote content description.
+ // This test uses the CA_UPDATE action that don't require a full
+ // MediaContentDescription to do an update.
+ void TestUpdateStreamsInRemoteContent() {
+ cricket::StreamParams stream1;
+ stream1.id = "Stream1";
+ stream1.groupid = "1";
+ stream1.ssrcs.push_back(kSsrc1);
+ stream1.cname = "stream1_cname";
+
+ cricket::StreamParams stream2;
+ stream2.id = "Stream2";
+ stream2.groupid = "2";
+ stream2.ssrcs.push_back(kSsrc2);
+ stream2.cname = "stream2_cname";
+
+ cricket::StreamParams stream3;
+ stream3.id = "Stream3";
+ stream3.groupid = "3";
+ stream3.ssrcs.push_back(kSsrc3);
+ stream3.cname = "stream3_cname";
+
+ CreateChannels(0, 0);
+ typename T::Content content1;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content1);
+ content1.AddStream(stream1);
+ EXPECT_EQ(0u, media_channel1_->recv_streams().size());
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content1, CA_OFFER));
+
+ ASSERT_EQ(1u, media_channel1_->codecs().size());
+ ASSERT_EQ(1u, media_channel1_->recv_streams().size());
+ EXPECT_EQ(stream1, media_channel1_->recv_streams()[0]);
+
+ // Update the remote streams by adding another sending stream.
+ // Use a partial updated session description.
+ typename T::Content content2;
+ content2.AddStream(stream2);
+ content2.AddStream(stream3);
+ content2.set_partial(true);
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content2, CA_UPDATE));
+ ASSERT_EQ(3u, media_channel1_->recv_streams().size());
+ EXPECT_EQ(stream1, media_channel1_->recv_streams()[0]);
+ EXPECT_EQ(stream2, media_channel1_->recv_streams()[1]);
+ EXPECT_EQ(stream3, media_channel1_->recv_streams()[2]);
+
+ // Update the remote streams by removing the first stream.
+ // This is done by removing all SSRCS for this particular stream.
+ typename T::Content content3;
+ stream1.ssrcs.clear();
+ content3.AddStream(stream1);
+ content3.set_partial(true);
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content3, CA_UPDATE));
+ ASSERT_EQ(2u, media_channel1_->recv_streams().size());
+ EXPECT_EQ(stream2, media_channel1_->recv_streams()[0]);
+ EXPECT_EQ(stream3, media_channel1_->recv_streams()[1]);
+
+ // Update the remote streams with a stream that does not change.
+ // The update is ignored.
+ typename T::Content content4;
+ content4.AddStream(stream2);
+ content4.set_partial(true);
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content4, CA_UPDATE));
+ ASSERT_EQ(2u, media_channel1_->recv_streams().size());
+ EXPECT_EQ(stream2, media_channel1_->recv_streams()[0]);
+ EXPECT_EQ(stream3, media_channel1_->recv_streams()[1]);
+ }
+
+ // Test that SetLocalContent and SetRemoteContent properly
+ // handles adding and removing StreamParams when the action is a full
+ // CA_OFFER / CA_ANSWER.
+ void TestChangeStreamParamsInContent() {
+ cricket::StreamParams stream1;
+ stream1.groupid = "group1";
+ stream1.id = "stream1";
+ stream1.ssrcs.push_back(kSsrc1);
+ stream1.cname = "stream1_cname";
+
+ cricket::StreamParams stream2;
+ stream2.groupid = "group1";
+ stream2.id = "stream2";
+ stream2.ssrcs.push_back(kSsrc2);
+ stream2.cname = "stream2_cname";
+
+ // Setup a call where channel 1 send |stream1| to channel 2.
+ CreateChannels(0, 0);
+ typename T::Content content1;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content1);
+ content1.AddStream(stream1);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content1, CA_OFFER));
+ EXPECT_TRUE(channel1_->Enable(true));
+ EXPECT_EQ(1u, media_channel1_->send_streams().size());
+
+ EXPECT_TRUE(channel2_->SetRemoteContent(&content1, CA_OFFER));
+ EXPECT_EQ(1u, media_channel2_->recv_streams().size());
+ session1_.Connect(&session2_);
+
+ // Channel 2 do not send anything.
+ typename T::Content content2;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content2);
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content2, CA_ANSWER));
+ EXPECT_EQ(0u, media_channel1_->recv_streams().size());
+ EXPECT_TRUE(channel2_->SetLocalContent(&content2, CA_ANSWER));
+ EXPECT_TRUE(channel2_->Enable(true));
+ EXPECT_EQ(0u, media_channel2_->send_streams().size());
+
+ EXPECT_TRUE(SendCustomRtp1(kSsrc1, 0));
+ EXPECT_TRUE(CheckCustomRtp2(kSsrc1, 0));
+
+ // Let channel 2 update the content by sending |stream2| and enable SRTP.
+ typename T::Content content3;
+ CreateContent(SECURE, kPcmuCodec, kH264Codec, &content3);
+ content3.AddStream(stream2);
+ EXPECT_TRUE(channel2_->SetLocalContent(&content3, CA_OFFER));
+ ASSERT_EQ(1u, media_channel2_->send_streams().size());
+ EXPECT_EQ(stream2, media_channel2_->send_streams()[0]);
+
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content3, CA_OFFER));
+ ASSERT_EQ(1u, media_channel1_->recv_streams().size());
+ EXPECT_EQ(stream2, media_channel1_->recv_streams()[0]);
+
+ // Channel 1 replies but stop sending stream1.
+ typename T::Content content4;
+ CreateContent(SECURE, kPcmuCodec, kH264Codec, &content4);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content4, CA_ANSWER));
+ EXPECT_EQ(0u, media_channel1_->send_streams().size());
+
+ EXPECT_TRUE(channel2_->SetRemoteContent(&content4, CA_ANSWER));
+ EXPECT_EQ(0u, media_channel2_->recv_streams().size());
+
+ EXPECT_TRUE(channel1_->secure());
+ EXPECT_TRUE(channel2_->secure());
+ EXPECT_TRUE(SendCustomRtp2(kSsrc2, 0));
+ EXPECT_TRUE(CheckCustomRtp1(kSsrc2, 0));
+ }
+
+ // Test that we only start playout and sending at the right times.
+ void TestPlayoutAndSendingStates() {
+ CreateChannels(0, 0);
+ EXPECT_FALSE(media_channel1_->playout());
+ EXPECT_FALSE(media_channel1_->sending());
+ EXPECT_FALSE(media_channel2_->playout());
+ EXPECT_FALSE(media_channel2_->sending());
+ EXPECT_TRUE(channel1_->Enable(true));
+ EXPECT_FALSE(media_channel1_->playout());
+ EXPECT_FALSE(media_channel1_->sending());
+ EXPECT_TRUE(channel1_->SetLocalContent(&local_media_content1_, CA_OFFER));
+ EXPECT_TRUE(media_channel1_->playout());
+ EXPECT_FALSE(media_channel1_->sending());
+ EXPECT_TRUE(channel2_->SetRemoteContent(&local_media_content1_, CA_OFFER));
+ EXPECT_FALSE(media_channel2_->playout());
+ EXPECT_FALSE(media_channel2_->sending());
+ EXPECT_TRUE(channel2_->SetLocalContent(&local_media_content2_, CA_ANSWER));
+ EXPECT_FALSE(media_channel2_->playout());
+ EXPECT_FALSE(media_channel2_->sending());
+ session1_.Connect(&session2_);
+ EXPECT_TRUE(media_channel1_->playout());
+ EXPECT_FALSE(media_channel1_->sending());
+ EXPECT_FALSE(media_channel2_->playout());
+ EXPECT_FALSE(media_channel2_->sending());
+ EXPECT_TRUE(channel2_->Enable(true));
+ EXPECT_TRUE(media_channel2_->playout());
+ EXPECT_TRUE(media_channel2_->sending());
+ EXPECT_TRUE(channel1_->SetRemoteContent(&local_media_content2_, CA_ANSWER));
+ EXPECT_TRUE(media_channel1_->playout());
+ EXPECT_TRUE(media_channel1_->sending());
+ }
+
+ void TestMuteStream() {
+ CreateChannels(0, 0);
+ // Test that we can Mute the default channel even though the sending SSRC is
+ // unknown.
+ EXPECT_FALSE(media_channel1_->IsStreamMuted(0));
+ EXPECT_TRUE(channel1_->MuteStream(0, true));
+ EXPECT_TRUE(media_channel1_->IsStreamMuted(0));
+ EXPECT_TRUE(channel1_->MuteStream(0, false));
+ EXPECT_FALSE(media_channel1_->IsStreamMuted(0));
+
+ // Test that we can not mute an unknown SSRC.
+ EXPECT_FALSE(channel1_->MuteStream(kSsrc1, true));
+
+ SendInitiate();
+ // After the local session description has been set, we can mute a stream
+ // with its SSRC.
+ EXPECT_TRUE(channel1_->MuteStream(kSsrc1, true));
+ EXPECT_TRUE(media_channel1_->IsStreamMuted(kSsrc1));
+ EXPECT_TRUE(channel1_->MuteStream(kSsrc1, false));
+ EXPECT_FALSE(media_channel1_->IsStreamMuted(kSsrc1));
+ }
+
+ // Test that changing the MediaContentDirection in the local and remote
+ // session description start playout and sending at the right time.
+ void TestMediaContentDirection() {
+ CreateChannels(0, 0);
+ typename T::Content content1;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content1);
+ typename T::Content content2;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content2);
+ // Set |content2| to be InActive.
+ content2.set_direction(cricket::MD_INACTIVE);
+
+ EXPECT_TRUE(channel1_->Enable(true));
+ EXPECT_TRUE(channel2_->Enable(true));
+ EXPECT_FALSE(media_channel1_->playout());
+ EXPECT_FALSE(media_channel1_->sending());
+ EXPECT_FALSE(media_channel2_->playout());
+ EXPECT_FALSE(media_channel2_->sending());
+
+ EXPECT_TRUE(channel1_->SetLocalContent(&content1, CA_OFFER));
+ EXPECT_TRUE(channel2_->SetRemoteContent(&content1, CA_OFFER));
+ EXPECT_TRUE(channel2_->SetLocalContent(&content2, CA_PRANSWER));
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content2, CA_PRANSWER));
+ session1_.Connect(&session2_);
+
+ EXPECT_TRUE(media_channel1_->playout());
+ EXPECT_FALSE(media_channel1_->sending()); // remote InActive
+ EXPECT_FALSE(media_channel2_->playout()); // local InActive
+ EXPECT_FALSE(media_channel2_->sending()); // local InActive
+
+ // Update |content2| to be RecvOnly.
+ content2.set_direction(cricket::MD_RECVONLY);
+ EXPECT_TRUE(channel2_->SetLocalContent(&content2, CA_PRANSWER));
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content2, CA_PRANSWER));
+
+ EXPECT_TRUE(media_channel1_->playout());
+ EXPECT_TRUE(media_channel1_->sending());
+ EXPECT_TRUE(media_channel2_->playout()); // local RecvOnly
+ EXPECT_FALSE(media_channel2_->sending()); // local RecvOnly
+
+ // Update |content2| to be SendRecv.
+ content2.set_direction(cricket::MD_SENDRECV);
+ EXPECT_TRUE(channel2_->SetLocalContent(&content2, CA_ANSWER));
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content2, CA_ANSWER));
+
+ EXPECT_TRUE(media_channel1_->playout());
+ EXPECT_TRUE(media_channel1_->sending());
+ EXPECT_TRUE(media_channel2_->playout());
+ EXPECT_TRUE(media_channel2_->sending());
+ }
+
+ // Test setting up a call.
+ void TestCallSetup() {
+ CreateChannels(0, 0);
+ EXPECT_FALSE(channel1_->secure());
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(media_channel1_->playout());
+ EXPECT_FALSE(media_channel1_->sending());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_FALSE(channel1_->secure());
+ EXPECT_TRUE(media_channel1_->sending());
+ EXPECT_EQ(1U, media_channel1_->codecs().size());
+ EXPECT_TRUE(media_channel2_->playout());
+ EXPECT_TRUE(media_channel2_->sending());
+ EXPECT_EQ(1U, media_channel2_->codecs().size());
+ }
+
+ // Test that we don't crash if packets are sent during call teardown
+ // when RTCP mux is enabled. This is a regression test against a specific
+ // race condition that would only occur when a RTCP packet was sent during
+ // teardown of a channel on which RTCP mux was enabled.
+ void TestCallTeardownRtcpMux() {
+ class LastWordMediaChannel : public T::MediaChannel {
+ public:
+ LastWordMediaChannel() : T::MediaChannel(NULL) {}
+ ~LastWordMediaChannel() {
+ T::MediaChannel::SendRtp(kPcmuFrame, sizeof(kPcmuFrame));
+ T::MediaChannel::SendRtcp(kRtcpReport, sizeof(kRtcpReport));
+ }
+ };
+ CreateChannels(new LastWordMediaChannel(), new LastWordMediaChannel(),
+ RTCP | RTCP_MUX, RTCP | RTCP_MUX,
+ talk_base::Thread::Current());
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_TRUE(SendTerminate());
+ }
+
+ // Send voice RTP data to the other side and ensure it gets there.
+ void SendRtpToRtp() {
+ CreateChannels(0, 0);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(1U, GetTransport1()->channels().size());
+ EXPECT_EQ(1U, GetTransport2()->channels().size());
+ EXPECT_TRUE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckRtp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+ }
+
+ // Check that RTCP is not transmitted if both sides don't support RTCP.
+ void SendNoRtcpToNoRtcp() {
+ CreateChannels(0, 0);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(1U, GetTransport1()->channels().size());
+ EXPECT_EQ(1U, GetTransport2()->channels().size());
+ EXPECT_FALSE(SendRtcp1());
+ EXPECT_FALSE(SendRtcp2());
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Check that RTCP is not transmitted if the callee doesn't support RTCP.
+ void SendNoRtcpToRtcp() {
+ CreateChannels(0, RTCP);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(1U, GetTransport1()->channels().size());
+ EXPECT_EQ(2U, GetTransport2()->channels().size());
+ EXPECT_FALSE(SendRtcp1());
+ EXPECT_FALSE(SendRtcp2());
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Check that RTCP is not transmitted if the caller doesn't support RTCP.
+ void SendRtcpToNoRtcp() {
+ CreateChannels(RTCP, 0);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(1U, GetTransport2()->channels().size());
+ EXPECT_FALSE(SendRtcp1());
+ EXPECT_FALSE(SendRtcp2());
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Check that RTCP is transmitted if both sides support RTCP.
+ void SendRtcpToRtcp() {
+ CreateChannels(RTCP, RTCP);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(2U, GetTransport2()->channels().size());
+ EXPECT_TRUE(SendRtcp1());
+ EXPECT_TRUE(SendRtcp2());
+ EXPECT_TRUE(CheckRtcp1());
+ EXPECT_TRUE(CheckRtcp2());
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Check that RTCP is transmitted if only the initiator supports mux.
+ void SendRtcpMuxToRtcp() {
+ CreateChannels(RTCP | RTCP_MUX, RTCP);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(2U, GetTransport2()->channels().size());
+ EXPECT_TRUE(SendRtcp1());
+ EXPECT_TRUE(SendRtcp2());
+ EXPECT_TRUE(CheckRtcp1());
+ EXPECT_TRUE(CheckRtcp2());
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Check that RTP and RTCP are transmitted ok when both sides support mux.
+ void SendRtcpMuxToRtcpMux() {
+ CreateChannels(RTCP | RTCP_MUX, RTCP | RTCP_MUX);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(1U, GetTransport2()->channels().size());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(1U, GetTransport1()->channels().size());
+ EXPECT_TRUE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(SendRtcp1());
+ EXPECT_TRUE(SendRtcp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckRtp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+ EXPECT_TRUE(CheckRtcp1());
+ EXPECT_TRUE(CheckRtcp2());
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Check that RTCP data sent by the initiator before the accept is not muxed.
+ void SendEarlyRtcpMuxToRtcp() {
+ CreateChannels(RTCP | RTCP_MUX, RTCP);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(2U, GetTransport2()->channels().size());
+
+ // RTCP can be sent before the call is accepted, if the transport is ready.
+ // It should not be muxed though, as the remote side doesn't support mux.
+ EXPECT_TRUE(SendRtcp1());
+ EXPECT_TRUE(CheckNoRtp2());
+ EXPECT_TRUE(CheckRtcp2());
+
+ // Send RTCP packet from callee and verify that it is received.
+ EXPECT_TRUE(SendRtcp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckRtcp1());
+
+ // Complete call setup and ensure everything is still OK.
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_TRUE(SendRtcp1());
+ EXPECT_TRUE(CheckRtcp2());
+ EXPECT_TRUE(SendRtcp2());
+ EXPECT_TRUE(CheckRtcp1());
+ }
+
+
+ // Check that RTCP data is not muxed until both sides have enabled muxing,
+ // but that we properly demux before we get the accept message, since there
+ // is a race between RTP data and the jingle accept.
+ void SendEarlyRtcpMuxToRtcpMux() {
+ CreateChannels(RTCP | RTCP_MUX, RTCP | RTCP_MUX);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(1U, GetTransport2()->channels().size());
+
+ // RTCP can't be sent yet, since the RTCP transport isn't writable, and
+ // we haven't yet received the accept that says we should mux.
+ EXPECT_FALSE(SendRtcp1());
+
+ // Send muxed RTCP packet from callee and verify that it is received.
+ EXPECT_TRUE(SendRtcp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckRtcp1());
+
+ // Complete call setup and ensure everything is still OK.
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(1U, GetTransport1()->channels().size());
+ EXPECT_TRUE(SendRtcp1());
+ EXPECT_TRUE(CheckRtcp2());
+ EXPECT_TRUE(SendRtcp2());
+ EXPECT_TRUE(CheckRtcp1());
+ }
+
+ // Test that we properly send SRTP with RTCP in both directions.
+ // You can pass in DTLS and/or RTCP_MUX as flags.
+ void SendSrtpToSrtp(int flags1_in = 0, int flags2_in = 0) {
+ ASSERT((flags1_in & ~(RTCP_MUX | DTLS)) == 0);
+ ASSERT((flags2_in & ~(RTCP_MUX | DTLS)) == 0);
+
+ int flags1 = RTCP | SECURE | flags1_in;
+ int flags2 = RTCP | SECURE | flags2_in;
+ bool dtls1 = !!(flags1_in & DTLS);
+ bool dtls2 = !!(flags2_in & DTLS);
+ CreateChannels(flags1, flags2);
+ EXPECT_FALSE(channel1_->secure());
+ EXPECT_FALSE(channel2_->secure());
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE_WAIT(channel1_->writable(), kEventTimeout);
+ EXPECT_TRUE_WAIT(channel2_->writable(), kEventTimeout);
+ EXPECT_TRUE(SendAccept());
+ EXPECT_TRUE(channel1_->secure());
+ EXPECT_TRUE(channel2_->secure());
+ EXPECT_EQ(dtls1 && dtls2, channel1_->secure_dtls());
+ EXPECT_EQ(dtls1 && dtls2, channel2_->secure_dtls());
+ EXPECT_TRUE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(SendRtcp1());
+ EXPECT_TRUE(SendRtcp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckRtp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+ EXPECT_TRUE(CheckRtcp1());
+ EXPECT_TRUE(CheckRtcp2());
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Test that we properly handling SRTP negotiating down to RTP.
+ void SendSrtpToRtp() {
+ CreateChannels(RTCP | SECURE, RTCP);
+ EXPECT_FALSE(channel1_->secure());
+ EXPECT_FALSE(channel2_->secure());
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_FALSE(channel1_->secure());
+ EXPECT_FALSE(channel2_->secure());
+ EXPECT_TRUE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(SendRtcp1());
+ EXPECT_TRUE(SendRtcp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckRtp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+ EXPECT_TRUE(CheckRtcp1());
+ EXPECT_TRUE(CheckRtcp2());
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Test that we can send and receive early media when a provisional answer is
+ // sent and received. The test uses SRTP, RTCP mux and SSRC mux.
+ void SendEarlyMediaUsingRtcpMuxSrtp() {
+ int sequence_number1_1 = 0, sequence_number2_2 = 0;
+
+ CreateChannels(SSRC_MUX | RTCP | RTCP_MUX | SECURE,
+ SSRC_MUX | RTCP | RTCP_MUX | SECURE);
+ EXPECT_TRUE(SendOffer());
+ EXPECT_TRUE(SendProvisionalAnswer());
+ EXPECT_TRUE(channel1_->secure());
+ EXPECT_TRUE(channel2_->secure());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(2U, GetTransport2()->channels().size());
+ EXPECT_TRUE(SendCustomRtcp1(kSsrc1));
+ EXPECT_TRUE(CheckCustomRtcp2(kSsrc1));
+ EXPECT_TRUE(SendCustomRtp1(kSsrc1, ++sequence_number1_1));
+ EXPECT_TRUE(CheckCustomRtp2(kSsrc1, sequence_number1_1));
+
+ // Send packets from callee and verify that it is received.
+ EXPECT_TRUE(SendCustomRtcp2(kSsrc2));
+ EXPECT_TRUE(CheckCustomRtcp1(kSsrc2));
+ EXPECT_TRUE(SendCustomRtp2(kSsrc2, ++sequence_number2_2));
+ EXPECT_TRUE(CheckCustomRtp1(kSsrc2, sequence_number2_2));
+
+ // Complete call setup and ensure everything is still OK.
+ EXPECT_TRUE(SendFinalAnswer());
+ EXPECT_EQ(1U, GetTransport1()->channels().size());
+ EXPECT_EQ(1U, GetTransport2()->channels().size());
+ EXPECT_TRUE(channel1_->secure());
+ EXPECT_TRUE(channel2_->secure());
+ EXPECT_TRUE(SendCustomRtcp1(kSsrc1));
+ EXPECT_TRUE(CheckCustomRtcp2(kSsrc1));
+ EXPECT_TRUE(SendCustomRtp1(kSsrc1, ++sequence_number1_1));
+ EXPECT_TRUE(CheckCustomRtp2(kSsrc1, sequence_number1_1));
+ EXPECT_TRUE(SendCustomRtcp2(kSsrc2));
+ EXPECT_TRUE(CheckCustomRtcp1(kSsrc2));
+ EXPECT_TRUE(SendCustomRtp2(kSsrc2, ++sequence_number2_2));
+ EXPECT_TRUE(CheckCustomRtp1(kSsrc2, sequence_number2_2));
+ }
+
+ // Test that we properly send RTP without SRTP from a thread.
+ void SendRtpToRtpOnThread() {
+ bool sent_rtp1, sent_rtp2, sent_rtcp1, sent_rtcp2;
+ CreateChannels(RTCP, RTCP);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ CallOnThread(&ChannelTest<T>::SendRtp1, &sent_rtp1);
+ CallOnThread(&ChannelTest<T>::SendRtp2, &sent_rtp2);
+ CallOnThread(&ChannelTest<T>::SendRtcp1, &sent_rtcp1);
+ CallOnThread(&ChannelTest<T>::SendRtcp2, &sent_rtcp2);
+ EXPECT_TRUE_WAIT(CheckRtp1(), 1000);
+ EXPECT_TRUE_WAIT(CheckRtp2(), 1000);
+ EXPECT_TRUE_WAIT(sent_rtp1, 1000);
+ EXPECT_TRUE_WAIT(sent_rtp2, 1000);
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+ EXPECT_TRUE_WAIT(CheckRtcp1(), 1000);
+ EXPECT_TRUE_WAIT(CheckRtcp2(), 1000);
+ EXPECT_TRUE_WAIT(sent_rtcp1, 1000);
+ EXPECT_TRUE_WAIT(sent_rtcp2, 1000);
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Test that we properly send SRTP with RTCP from a thread.
+ void SendSrtpToSrtpOnThread() {
+ bool sent_rtp1, sent_rtp2, sent_rtcp1, sent_rtcp2;
+ CreateChannels(RTCP | SECURE, RTCP | SECURE);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ CallOnThread(&ChannelTest<T>::SendRtp1, &sent_rtp1);
+ CallOnThread(&ChannelTest<T>::SendRtp2, &sent_rtp2);
+ CallOnThread(&ChannelTest<T>::SendRtcp1, &sent_rtcp1);
+ CallOnThread(&ChannelTest<T>::SendRtcp2, &sent_rtcp2);
+ EXPECT_TRUE_WAIT(CheckRtp1(), 1000);
+ EXPECT_TRUE_WAIT(CheckRtp2(), 1000);
+ EXPECT_TRUE_WAIT(sent_rtp1, 1000);
+ EXPECT_TRUE_WAIT(sent_rtp2, 1000);
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+ EXPECT_TRUE_WAIT(CheckRtcp1(), 1000);
+ EXPECT_TRUE_WAIT(CheckRtcp2(), 1000);
+ EXPECT_TRUE_WAIT(sent_rtcp1, 1000);
+ EXPECT_TRUE_WAIT(sent_rtcp2, 1000);
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ // Test that the mediachannel retains its sending state after the transport
+ // becomes non-writable.
+ void SendWithWritabilityLoss() {
+ CreateChannels(0, 0);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(1U, GetTransport1()->channels().size());
+ EXPECT_EQ(1U, GetTransport2()->channels().size());
+ EXPECT_TRUE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckRtp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+
+ // Lose writability, with optimistic send
+ SetOptimisticDataSend(true);
+ GetTransport1()->SetWritable(false);
+ EXPECT_TRUE(media_channel1_->sending());
+ EXPECT_TRUE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckRtp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+
+ // Check again with optimistic send off, which should fail.
+ SetOptimisticDataSend(false);
+ EXPECT_FALSE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+
+ // Regain writability
+ GetTransport1()->SetWritable(true);
+ EXPECT_TRUE(media_channel1_->sending());
+ EXPECT_TRUE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckRtp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+
+ // Lose writability completely
+ GetTransport1()->SetDestination(NULL);
+ EXPECT_TRUE(media_channel1_->sending());
+
+ // Should fail regardless of optimistic send at this point.
+ SetOptimisticDataSend(true);
+ EXPECT_FALSE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+ SetOptimisticDataSend(false);
+ EXPECT_FALSE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+
+ // Gain writability back
+ GetTransport1()->SetDestination(GetTransport2());
+ EXPECT_TRUE(media_channel1_->sending());
+ EXPECT_TRUE(SendRtp1());
+ EXPECT_TRUE(SendRtp2());
+ EXPECT_TRUE(CheckRtp1());
+ EXPECT_TRUE(CheckRtp2());
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckNoRtp2());
+ }
+
+ void SendSsrcMuxToSsrcMuxWithRtcpMux() {
+ int sequence_number1_1 = 0, sequence_number2_2 = 0;
+ CreateChannels(SSRC_MUX | RTCP | RTCP_MUX, SSRC_MUX | RTCP | RTCP_MUX);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(1U, GetTransport2()->channels().size());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(1U, GetTransport1()->channels().size());
+ EXPECT_EQ(1U, GetTransport2()->channels().size());
+ EXPECT_TRUE(channel1_->ssrc_filter()->IsActive());
+ // channel1 - should have media_content2 as remote. i.e. kSsrc2
+ EXPECT_TRUE(channel1_->ssrc_filter()->FindStream(kSsrc2));
+ EXPECT_TRUE(channel2_->ssrc_filter()->IsActive());
+ // channel2 - should have media_content1 as remote. i.e. kSsrc1
+ EXPECT_TRUE(channel2_->ssrc_filter()->FindStream(kSsrc1));
+ EXPECT_TRUE(SendCustomRtp1(kSsrc1, ++sequence_number1_1));
+ EXPECT_TRUE(SendCustomRtp2(kSsrc2, ++sequence_number2_2));
+ EXPECT_TRUE(SendCustomRtcp1(kSsrc1));
+ EXPECT_TRUE(SendCustomRtcp2(kSsrc2));
+ EXPECT_TRUE(CheckCustomRtp1(kSsrc2, sequence_number2_2));
+ EXPECT_TRUE(CheckNoRtp1());
+ EXPECT_TRUE(CheckCustomRtp2(kSsrc1, sequence_number1_1));
+ EXPECT_TRUE(CheckNoRtp2());
+ EXPECT_TRUE(CheckCustomRtcp1(kSsrc2));
+ EXPECT_TRUE(CheckNoRtcp1());
+ EXPECT_TRUE(CheckCustomRtcp2(kSsrc1));
+ EXPECT_TRUE(CheckNoRtcp2());
+ }
+
+ void SendSsrcMuxToSsrcMux() {
+ int sequence_number1_1 = 0, sequence_number2_2 = 0;
+ CreateChannels(SSRC_MUX | RTCP, SSRC_MUX | RTCP);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(2U, GetTransport2()->channels().size());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(2U, GetTransport2()->channels().size());
+ EXPECT_TRUE(channel1_->ssrc_filter()->IsActive());
+ // channel1 - should have media_content2 as remote. i.e. kSsrc2
+ EXPECT_TRUE(channel1_->ssrc_filter()->FindStream(kSsrc2));
+ EXPECT_TRUE(channel2_->ssrc_filter()->IsActive());
+ // channel2 - should have media_content1 as remote. i.e. kSsrc1
+ EXPECT_TRUE(SendCustomRtp1(kSsrc1, ++sequence_number1_1));
+ EXPECT_TRUE(SendCustomRtp2(kSsrc2, ++sequence_number2_2));
+ EXPECT_TRUE(SendCustomRtcp1(kSsrc1));
+ EXPECT_TRUE(SendCustomRtcp2(kSsrc2));
+ EXPECT_TRUE(CheckCustomRtp1(kSsrc2, sequence_number2_2));
+ EXPECT_FALSE(CheckCustomRtp1(kSsrc1, sequence_number2_2));
+ EXPECT_TRUE(CheckCustomRtp2(kSsrc1, sequence_number1_1));
+ EXPECT_FALSE(CheckCustomRtp2(kSsrc2, sequence_number1_1));
+ EXPECT_TRUE(CheckCustomRtcp1(kSsrc2));
+ EXPECT_FALSE(CheckCustomRtcp1(kSsrc1));
+ EXPECT_TRUE(CheckCustomRtcp2(kSsrc1));
+ EXPECT_FALSE(CheckCustomRtcp2(kSsrc2));
+ }
+
+ // Test that the media monitor can be run and gives timely callbacks.
+ void TestMediaMonitor() {
+ static const int kTimeout = 500;
+ CreateChannels(0, 0);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ channel1_->StartMediaMonitor(100);
+ channel2_->StartMediaMonitor(100);
+ // Ensure we get callbacks and stop.
+ EXPECT_TRUE_WAIT(media_info_callbacks1_ > 0, kTimeout);
+ EXPECT_TRUE_WAIT(media_info_callbacks2_ > 0, kTimeout);
+ channel1_->StopMediaMonitor();
+ channel2_->StopMediaMonitor();
+ // Ensure a restart of a stopped monitor works.
+ channel1_->StartMediaMonitor(100);
+ EXPECT_TRUE_WAIT(media_info_callbacks1_ > 0, kTimeout);
+ channel1_->StopMediaMonitor();
+ // Ensure stopping a stopped monitor is OK.
+ channel1_->StopMediaMonitor();
+ }
+
+ void TestMediaSinks() {
+ CreateChannels(0, 0);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_FALSE(channel1_->HasSendSinks(cricket::SINK_POST_CRYPTO));
+ EXPECT_FALSE(channel1_->HasRecvSinks(cricket::SINK_POST_CRYPTO));
+ EXPECT_FALSE(channel1_->HasSendSinks(cricket::SINK_PRE_CRYPTO));
+ EXPECT_FALSE(channel1_->HasRecvSinks(cricket::SINK_PRE_CRYPTO));
+
+ talk_base::Pathname path;
+ EXPECT_TRUE(talk_base::Filesystem::GetTemporaryFolder(path, true, NULL));
+ path.SetFilename("sink-test.rtpdump");
+ talk_base::scoped_ptr<cricket::RtpDumpSink> sink(
+ new cricket::RtpDumpSink(Open(path.pathname())));
+ sink->set_packet_filter(cricket::PF_ALL);
+ EXPECT_TRUE(sink->Enable(true));
+ channel1_->RegisterSendSink(
+ sink.get(), &cricket::RtpDumpSink::OnPacket, cricket::SINK_POST_CRYPTO);
+ EXPECT_TRUE(channel1_->HasSendSinks(cricket::SINK_POST_CRYPTO));
+ EXPECT_FALSE(channel1_->HasRecvSinks(cricket::SINK_POST_CRYPTO));
+ EXPECT_FALSE(channel1_->HasSendSinks(cricket::SINK_PRE_CRYPTO));
+ EXPECT_FALSE(channel1_->HasRecvSinks(cricket::SINK_PRE_CRYPTO));
+
+ // The first packet is recorded with header + data.
+ EXPECT_TRUE(SendRtp1());
+ // The second packet is recorded with header only.
+ sink->set_packet_filter(cricket::PF_RTPHEADER);
+ EXPECT_TRUE(SendRtp1());
+ // The third packet is not recorded since sink is disabled.
+ EXPECT_TRUE(sink->Enable(false));
+ EXPECT_TRUE(SendRtp1());
+ // The fourth packet is not recorded since sink is unregistered.
+ EXPECT_TRUE(sink->Enable(true));
+ channel1_->UnregisterSendSink(sink.get(), cricket::SINK_POST_CRYPTO);
+ EXPECT_TRUE(SendRtp1());
+ sink.reset(); // This will close the file.
+
+ // Read the recorded file and verify two packets.
+ talk_base::scoped_ptr<talk_base::StreamInterface> stream(
+ talk_base::Filesystem::OpenFile(path, "rb"));
+
+ cricket::RtpDumpReader reader(stream.get());
+ cricket::RtpDumpPacket packet;
+ EXPECT_EQ(talk_base::SR_SUCCESS, reader.ReadPacket(&packet));
+ std::string read_packet(reinterpret_cast<const char*>(&packet.data[0]),
+ packet.data.size());
+ EXPECT_EQ(rtp_packet_, read_packet);
+
+ EXPECT_EQ(talk_base::SR_SUCCESS, reader.ReadPacket(&packet));
+ size_t len = 0;
+ packet.GetRtpHeaderLen(&len);
+ EXPECT_EQ(len, packet.data.size());
+ EXPECT_EQ(0, memcmp(&packet.data[0], rtp_packet_.c_str(), len));
+
+ EXPECT_EQ(talk_base::SR_EOS, reader.ReadPacket(&packet));
+
+ // Delete the file for media recording.
+ stream.reset();
+ EXPECT_TRUE(talk_base::Filesystem::DeleteFile(path));
+ }
+
+ void TestSetContentFailure() {
+ CreateChannels(0, 0);
+ typename T::Content content;
+ cricket::SessionDescription* sdesc_loc = new cricket::SessionDescription();
+ cricket::SessionDescription* sdesc_rem = new cricket::SessionDescription();
+
+ // Set up the session description.
+ CreateContent(0, kPcmuCodec, kH264Codec, &content);
+ sdesc_loc->AddContent(cricket::CN_AUDIO, cricket::NS_JINGLE_RTP,
+ new cricket::AudioContentDescription());
+ sdesc_loc->AddContent(cricket::CN_VIDEO, cricket::NS_JINGLE_RTP,
+ new cricket::VideoContentDescription());
+ EXPECT_TRUE(session1_.set_local_description(sdesc_loc));
+ sdesc_rem->AddContent(cricket::CN_AUDIO, cricket::NS_JINGLE_RTP,
+ new cricket::AudioContentDescription());
+ sdesc_rem->AddContent(cricket::CN_VIDEO, cricket::NS_JINGLE_RTP,
+ new cricket::VideoContentDescription());
+ EXPECT_TRUE(session1_.set_remote_description(sdesc_rem));
+
+ // Test failures in SetLocalContent.
+ media_channel1_->set_fail_set_recv_codecs(true);
+ session1_.SetError(cricket::BaseSession::ERROR_NONE);
+ session1_.SetState(cricket::Session::STATE_SENTINITIATE);
+ EXPECT_EQ(cricket::BaseSession::ERROR_CONTENT, session1_.error());
+ media_channel1_->set_fail_set_recv_codecs(true);
+ session1_.SetError(cricket::BaseSession::ERROR_NONE);
+ session1_.SetState(cricket::Session::STATE_SENTACCEPT);
+ EXPECT_EQ(cricket::BaseSession::ERROR_CONTENT, session1_.error());
+
+ // Test failures in SetRemoteContent.
+ media_channel1_->set_fail_set_send_codecs(true);
+ session1_.SetError(cricket::BaseSession::ERROR_NONE);
+ session1_.SetState(cricket::Session::STATE_RECEIVEDINITIATE);
+ EXPECT_EQ(cricket::BaseSession::ERROR_CONTENT, session1_.error());
+ media_channel1_->set_fail_set_send_codecs(true);
+ session1_.SetError(cricket::BaseSession::ERROR_NONE);
+ session1_.SetState(cricket::Session::STATE_RECEIVEDACCEPT);
+ EXPECT_EQ(cricket::BaseSession::ERROR_CONTENT, session1_.error());
+ }
+
+ void TestSendTwoOffers() {
+ CreateChannels(0, 0);
+
+ // Set up the initial session description.
+ cricket::SessionDescription* sdesc = CreateSessionDescriptionWithStream(1);
+ EXPECT_TRUE(session1_.set_local_description(sdesc));
+
+ session1_.SetError(cricket::BaseSession::ERROR_NONE);
+ session1_.SetState(cricket::Session::STATE_SENTINITIATE);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_TRUE(media_channel1_->HasSendStream(1));
+
+ // Update the local description and set the state again.
+ sdesc = CreateSessionDescriptionWithStream(2);
+ EXPECT_TRUE(session1_.set_local_description(sdesc));
+
+ session1_.SetState(cricket::Session::STATE_SENTINITIATE);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_FALSE(media_channel1_->HasSendStream(1));
+ EXPECT_TRUE(media_channel1_->HasSendStream(2));
+ }
+
+ void TestReceiveTwoOffers() {
+ CreateChannels(0, 0);
+
+ // Set up the initial session description.
+ cricket::SessionDescription* sdesc = CreateSessionDescriptionWithStream(1);
+ EXPECT_TRUE(session1_.set_remote_description(sdesc));
+
+ session1_.SetError(cricket::BaseSession::ERROR_NONE);
+ session1_.SetState(cricket::Session::STATE_RECEIVEDINITIATE);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_TRUE(media_channel1_->HasRecvStream(1));
+
+ sdesc = CreateSessionDescriptionWithStream(2);
+ EXPECT_TRUE(session1_.set_remote_description(sdesc));
+ session1_.SetState(cricket::Session::STATE_RECEIVEDINITIATE);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_FALSE(media_channel1_->HasRecvStream(1));
+ EXPECT_TRUE(media_channel1_->HasRecvStream(2));
+ }
+
+ void TestSendPrAnswer() {
+ CreateChannels(0, 0);
+
+ // Set up the initial session description.
+ cricket::SessionDescription* sdesc = CreateSessionDescriptionWithStream(1);
+ EXPECT_TRUE(session1_.set_remote_description(sdesc));
+
+ session1_.SetError(cricket::BaseSession::ERROR_NONE);
+ session1_.SetState(cricket::Session::STATE_RECEIVEDINITIATE);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_TRUE(media_channel1_->HasRecvStream(1));
+
+ // Send PRANSWER
+ sdesc = CreateSessionDescriptionWithStream(2);
+ EXPECT_TRUE(session1_.set_local_description(sdesc));
+
+ session1_.SetState(cricket::Session::STATE_SENTPRACCEPT);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_TRUE(media_channel1_->HasRecvStream(1));
+ EXPECT_TRUE(media_channel1_->HasSendStream(2));
+
+ // Send ACCEPT
+ sdesc = CreateSessionDescriptionWithStream(3);
+ EXPECT_TRUE(session1_.set_local_description(sdesc));
+
+ session1_.SetState(cricket::Session::STATE_SENTACCEPT);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_TRUE(media_channel1_->HasRecvStream(1));
+ EXPECT_FALSE(media_channel1_->HasSendStream(2));
+ EXPECT_TRUE(media_channel1_->HasSendStream(3));
+ }
+
+ void TestReceivePrAnswer() {
+ CreateChannels(0, 0);
+
+ // Set up the initial session description.
+ cricket::SessionDescription* sdesc = CreateSessionDescriptionWithStream(1);
+ EXPECT_TRUE(session1_.set_local_description(sdesc));
+
+ session1_.SetError(cricket::BaseSession::ERROR_NONE);
+ session1_.SetState(cricket::Session::STATE_SENTINITIATE);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_TRUE(media_channel1_->HasSendStream(1));
+
+ // Receive PRANSWER
+ sdesc = CreateSessionDescriptionWithStream(2);
+ EXPECT_TRUE(session1_.set_remote_description(sdesc));
+
+ session1_.SetState(cricket::Session::STATE_RECEIVEDPRACCEPT);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_TRUE(media_channel1_->HasSendStream(1));
+ EXPECT_TRUE(media_channel1_->HasRecvStream(2));
+
+ // Receive ACCEPT
+ sdesc = CreateSessionDescriptionWithStream(3);
+ EXPECT_TRUE(session1_.set_remote_description(sdesc));
+
+ session1_.SetState(cricket::Session::STATE_RECEIVEDACCEPT);
+ EXPECT_EQ(cricket::BaseSession::ERROR_NONE, session1_.error());
+ EXPECT_TRUE(media_channel1_->HasSendStream(1));
+ EXPECT_FALSE(media_channel1_->HasRecvStream(2));
+ EXPECT_TRUE(media_channel1_->HasRecvStream(3));
+ }
+
+ void TestFlushRtcp() {
+ bool send_rtcp1;
+
+ CreateChannels(RTCP, RTCP);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(2U, GetTransport1()->channels().size());
+ EXPECT_EQ(2U, GetTransport2()->channels().size());
+
+ // Send RTCP1 from a different thread.
+ CallOnThreadAndWaitForDone(&ChannelTest<T>::SendRtcp1, &send_rtcp1);
+ EXPECT_TRUE(send_rtcp1);
+ // The sending message is only posted. channel2_ should be empty.
+ EXPECT_TRUE(CheckNoRtcp2());
+
+ // When channel1_ is deleted, the RTCP packet should be sent out to
+ // channel2_.
+ channel1_.reset();
+ EXPECT_TRUE(CheckRtcp2());
+ }
+
+ void TestChangeStateError() {
+ CreateChannels(RTCP, RTCP);
+ EXPECT_TRUE(SendInitiate());
+ media_channel2_->set_fail_set_send(true);
+ EXPECT_TRUE(channel2_->Enable(true));
+ EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_DEVICE_OPEN_FAILED,
+ error_);
+ }
+
+ void TestSrtpError() {
+ static const unsigned char kBadPacket[] = {
+ 0x84, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+ };
+ CreateChannels(RTCP | SECURE, RTCP | SECURE);
+ EXPECT_FALSE(channel1_->secure());
+ EXPECT_FALSE(channel2_->secure());
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_TRUE(channel1_->secure());
+ EXPECT_TRUE(channel2_->secure());
+ channel2_->set_srtp_signal_silent_time(200);
+
+ // Testing failures in sending packets.
+ EXPECT_FALSE(media_channel2_->SendRtp(kBadPacket, sizeof(kBadPacket)));
+ // The first failure will trigger an error.
+ EXPECT_EQ_WAIT(T::MediaChannel::ERROR_REC_SRTP_ERROR, error_, 500);
+ error_ = T::MediaChannel::ERROR_NONE;
+ // The next 1 sec failures will not trigger an error.
+ EXPECT_FALSE(media_channel2_->SendRtp(kBadPacket, sizeof(kBadPacket)));
+ // Wait for a while to ensure no message comes in.
+ talk_base::Thread::Current()->ProcessMessages(210);
+ EXPECT_EQ(T::MediaChannel::ERROR_NONE, error_);
+ // The error will be triggered again.
+ EXPECT_FALSE(media_channel2_->SendRtp(kBadPacket, sizeof(kBadPacket)));
+ EXPECT_EQ_WAIT(T::MediaChannel::ERROR_REC_SRTP_ERROR, error_, 500);
+
+ // Testing failures in receiving packets.
+ error_ = T::MediaChannel::ERROR_NONE;
+ cricket::TransportChannel* transport_channel =
+ channel2_->transport_channel();
+ transport_channel->SignalReadPacket(
+ transport_channel, reinterpret_cast<const char*>(kBadPacket),
+ sizeof(kBadPacket), 0);
+ EXPECT_EQ_WAIT(T::MediaChannel::ERROR_PLAY_SRTP_AUTH_FAILED, error_, 500);
+ }
+
+ void TestOnReadyToSend() {
+ CreateChannels(RTCP, RTCP);
+ TransportChannel* rtp = channel1_->transport_channel();
+ TransportChannel* rtcp = channel1_->rtcp_transport_channel();
+ EXPECT_FALSE(media_channel1_->ready_to_send());
+ rtp->SignalReadyToSend(rtp);
+ EXPECT_FALSE(media_channel1_->ready_to_send());
+ rtcp->SignalReadyToSend(rtcp);
+ // MediaChannel::OnReadyToSend only be called when both rtp and rtcp
+ // channel are ready to send.
+ EXPECT_TRUE(media_channel1_->ready_to_send());
+
+ // rtp channel becomes not ready to send will be propagated to mediachannel
+ channel1_->SetReadyToSend(rtp, false);
+ EXPECT_FALSE(media_channel1_->ready_to_send());
+ channel1_->SetReadyToSend(rtp, true);
+ EXPECT_TRUE(media_channel1_->ready_to_send());
+
+ // rtcp channel becomes not ready to send will be propagated to mediachannel
+ channel1_->SetReadyToSend(rtcp, false);
+ EXPECT_FALSE(media_channel1_->ready_to_send());
+ channel1_->SetReadyToSend(rtcp, true);
+ EXPECT_TRUE(media_channel1_->ready_to_send());
+ }
+
+ void TestOnReadyToSendWithRtcpMux() {
+ CreateChannels(RTCP, RTCP);
+ typename T::Content content;
+ CreateContent(0, kPcmuCodec, kH264Codec, &content);
+ // Both sides agree on mux. Should no longer be a separate RTCP channel.
+ content.set_rtcp_mux(true);
+ EXPECT_TRUE(channel1_->SetLocalContent(&content, CA_OFFER));
+ EXPECT_TRUE(channel1_->SetRemoteContent(&content, CA_ANSWER));
+ EXPECT_TRUE(channel1_->rtcp_transport_channel() == NULL);
+ TransportChannel* rtp = channel1_->transport_channel();
+ EXPECT_FALSE(media_channel1_->ready_to_send());
+ // In the case of rtcp mux, the SignalReadyToSend() from rtp channel
+ // should trigger the MediaChannel's OnReadyToSend.
+ rtp->SignalReadyToSend(rtp);
+ EXPECT_TRUE(media_channel1_->ready_to_send());
+ channel1_->SetReadyToSend(rtp, false);
+ EXPECT_FALSE(media_channel1_->ready_to_send());
+ }
+
+ protected:
+ cricket::FakeSession session1_;
+ cricket::FakeSession session2_;
+ cricket::FakeMediaEngine media_engine_;
+ // The media channels are owned by the voice channel objects below.
+ typename T::MediaChannel* media_channel1_;
+ typename T::MediaChannel* media_channel2_;
+ talk_base::scoped_ptr<typename T::Channel> channel1_;
+ talk_base::scoped_ptr<typename T::Channel> channel2_;
+ typename T::Content local_media_content1_;
+ typename T::Content local_media_content2_;
+ typename T::Content remote_media_content1_;
+ typename T::Content remote_media_content2_;
+ talk_base::scoped_ptr<talk_base::SSLIdentity> identity1_;
+ talk_base::scoped_ptr<talk_base::SSLIdentity> identity2_;
+ // The RTP and RTCP packets to send in the tests.
+ std::string rtp_packet_;
+ std::string rtcp_packet_;
+ int media_info_callbacks1_;
+ int media_info_callbacks2_;
+ bool mute_callback_recved_;
+ bool mute_callback_value_;
+
+ uint32 ssrc_;
+ typename T::MediaChannel::Error error_;
+};
+
+
+template<>
+void ChannelTest<VoiceTraits>::CreateContent(
+ int flags,
+ const cricket::AudioCodec& audio_codec,
+ const cricket::VideoCodec& video_codec,
+ cricket::AudioContentDescription* audio) {
+ audio->AddCodec(audio_codec);
+ audio->set_rtcp_mux((flags & RTCP_MUX) != 0);
+ if (flags & SECURE) {
+ audio->AddCrypto(cricket::CryptoParams(
+ 1, cricket::CS_AES_CM_128_HMAC_SHA1_32,
+ "inline:" + talk_base::CreateRandomString(40), ""));
+ }
+}
+
+template<>
+void ChannelTest<VoiceTraits>::CopyContent(
+ const cricket::AudioContentDescription& source,
+ cricket::AudioContentDescription* audio) {
+ *audio = source;
+}
+
+template<>
+bool ChannelTest<VoiceTraits>::CodecMatches(const cricket::AudioCodec& c1,
+ const cricket::AudioCodec& c2) {
+ return c1.name == c2.name && c1.clockrate == c2.clockrate &&
+ c1.bitrate == c2.bitrate && c1.channels == c2.channels;
+}
+
+template<>
+void ChannelTest<VoiceTraits>::AddLegacyStreamInContent(
+ uint32 ssrc, int flags, cricket::AudioContentDescription* audio) {
+ audio->AddLegacyStream(ssrc);
+}
+
+class VoiceChannelTest
+ : public ChannelTest<VoiceTraits> {
+ public:
+ typedef ChannelTest<VoiceTraits>
+ Base;
+ VoiceChannelTest() : Base(kPcmuFrame, sizeof(kPcmuFrame),
+ kRtcpReport, sizeof(kRtcpReport)) {
+ }
+
+ void TestSetChannelOptions() {
+ CreateChannels(0, 0);
+
+ cricket::AudioOptions options1;
+ options1.echo_cancellation.Set(false);
+ cricket::AudioOptions options2;
+ options2.echo_cancellation.Set(true);
+
+ channel1_->SetChannelOptions(options1);
+ channel2_->SetChannelOptions(options1);
+ cricket::AudioOptions actual_options;
+ ASSERT_TRUE(media_channel1_->GetOptions(&actual_options));
+ EXPECT_EQ(options1, actual_options);
+ ASSERT_TRUE(media_channel2_->GetOptions(&actual_options));
+ EXPECT_EQ(options1, actual_options);
+
+ channel1_->SetChannelOptions(options2);
+ channel2_->SetChannelOptions(options2);
+ ASSERT_TRUE(media_channel1_->GetOptions(&actual_options));
+ EXPECT_EQ(options2, actual_options);
+ ASSERT_TRUE(media_channel2_->GetOptions(&actual_options));
+ EXPECT_EQ(options2, actual_options);
+ }
+};
+
+// override to add NULL parameter
+template<>
+cricket::VideoChannel* ChannelTest<VideoTraits>::CreateChannel(
+ talk_base::Thread* thread, cricket::MediaEngineInterface* engine,
+ cricket::FakeVideoMediaChannel* ch, cricket::BaseSession* session,
+ bool rtcp) {
+ cricket::VideoChannel* channel = new cricket::VideoChannel(
+ thread, engine, ch, session, cricket::CN_VIDEO, rtcp, NULL);
+ if (!channel->Init()) {
+ delete channel;
+ channel = NULL;
+ }
+ return channel;
+}
+
+// override to add 0 parameter
+template<>
+bool ChannelTest<VideoTraits>::AddStream1(int id) {
+ return channel1_->AddRecvStream(cricket::StreamParams::CreateLegacy(id));
+}
+
+template<>
+void ChannelTest<VideoTraits>::CreateContent(
+ int flags,
+ const cricket::AudioCodec& audio_codec,
+ const cricket::VideoCodec& video_codec,
+ cricket::VideoContentDescription* video) {
+ video->AddCodec(video_codec);
+ video->set_rtcp_mux((flags & RTCP_MUX) != 0);
+ if (flags & SECURE) {
+ video->AddCrypto(cricket::CryptoParams(
+ 1, cricket::CS_AES_CM_128_HMAC_SHA1_80,
+ "inline:" + talk_base::CreateRandomString(40), ""));
+ }
+}
+
+template<>
+void ChannelTest<VideoTraits>::CopyContent(
+ const cricket::VideoContentDescription& source,
+ cricket::VideoContentDescription* video) {
+ *video = source;
+}
+
+template<>
+bool ChannelTest<VideoTraits>::CodecMatches(const cricket::VideoCodec& c1,
+ const cricket::VideoCodec& c2) {
+ return c1.name == c2.name && c1.width == c2.width && c1.height == c2.height &&
+ c1.framerate == c2.framerate;
+}
+
+template<>
+void ChannelTest<VideoTraits>::AddLegacyStreamInContent(
+ uint32 ssrc, int flags, cricket::VideoContentDescription* video) {
+ video->AddLegacyStream(ssrc);
+}
+
+class VideoChannelTest
+ : public ChannelTest<VideoTraits> {
+ public:
+ typedef ChannelTest<VideoTraits>
+ Base;
+ VideoChannelTest() : Base(kH264Packet, sizeof(kH264Packet),
+ kRtcpReport, sizeof(kRtcpReport)) {
+ }
+
+ void TestSetChannelOptions() {
+ CreateChannels(0, 0);
+
+ cricket::VideoOptions o1, o2;
+ o1.video_noise_reduction.Set(true);
+
+ channel1_->SetChannelOptions(o1);
+ channel2_->SetChannelOptions(o1);
+ EXPECT_TRUE(media_channel1_->GetOptions(&o2));
+ EXPECT_EQ(o1, o2);
+ EXPECT_TRUE(media_channel2_->GetOptions(&o2));
+ EXPECT_EQ(o1, o2);
+
+ o1.video_leaky_bucket.Set(true);
+ channel1_->SetChannelOptions(o1);
+ channel2_->SetChannelOptions(o1);
+ EXPECT_TRUE(media_channel1_->GetOptions(&o2));
+ EXPECT_EQ(o1, o2);
+ EXPECT_TRUE(media_channel2_->GetOptions(&o2));
+ EXPECT_EQ(o1, o2);
+ }
+};
+
+
+// VoiceChannelTest
+
+TEST_F(VoiceChannelTest, TestInit) {
+ Base::TestInit();
+ EXPECT_FALSE(media_channel1_->IsStreamMuted(0));
+ EXPECT_TRUE(media_channel1_->dtmf_info_queue().empty());
+}
+
+TEST_F(VoiceChannelTest, TestSetContents) {
+ Base::TestSetContents();
+}
+
+TEST_F(VoiceChannelTest, TestSetContentsNullOffer) {
+ Base::TestSetContentsNullOffer();
+}
+
+TEST_F(VoiceChannelTest, TestSetContentsRtcpMux) {
+ Base::TestSetContentsRtcpMux();
+}
+
+TEST_F(VoiceChannelTest, TestSetContentsRtcpMuxWithPrAnswer) {
+ Base::TestSetContentsRtcpMux();
+}
+
+TEST_F(VoiceChannelTest, TestSetRemoteContentUpdate) {
+ Base::TestSetRemoteContentUpdate();
+}
+
+TEST_F(VoiceChannelTest, TestStreams) {
+ Base::TestStreams();
+}
+
+TEST_F(VoiceChannelTest, TestUpdateStreamsInLocalContent) {
+ Base::TestUpdateStreamsInLocalContent();
+}
+
+TEST_F(VoiceChannelTest, TestUpdateRemoteStreamsInContent) {
+ Base::TestUpdateStreamsInRemoteContent();
+}
+
+TEST_F(VoiceChannelTest, TestChangeStreamParamsInContent) {
+ Base::TestChangeStreamParamsInContent();
+}
+
+TEST_F(VoiceChannelTest, TestPlayoutAndSendingStates) {
+ Base::TestPlayoutAndSendingStates();
+}
+
+TEST_F(VoiceChannelTest, TestMuteStream) {
+ Base::TestMuteStream();
+}
+
+TEST_F(VoiceChannelTest, TestMediaContentDirection) {
+ Base::TestMediaContentDirection();
+}
+
+TEST_F(VoiceChannelTest, TestCallSetup) {
+ Base::TestCallSetup();
+}
+
+TEST_F(VoiceChannelTest, TestCallTeardownRtcpMux) {
+ Base::TestCallTeardownRtcpMux();
+}
+
+TEST_F(VoiceChannelTest, SendRtpToRtp) {
+ Base::SendRtpToRtp();
+}
+
+TEST_F(VoiceChannelTest, SendNoRtcpToNoRtcp) {
+ Base::SendNoRtcpToNoRtcp();
+}
+
+TEST_F(VoiceChannelTest, SendNoRtcpToRtcp) {
+ Base::SendNoRtcpToRtcp();
+}
+
+TEST_F(VoiceChannelTest, SendRtcpToNoRtcp) {
+ Base::SendRtcpToNoRtcp();
+}
+
+TEST_F(VoiceChannelTest, SendRtcpToRtcp) {
+ Base::SendRtcpToRtcp();
+}
+
+TEST_F(VoiceChannelTest, SendRtcpMuxToRtcp) {
+ Base::SendRtcpMuxToRtcp();
+}
+
+TEST_F(VoiceChannelTest, SendRtcpMuxToRtcpMux) {
+ Base::SendRtcpMuxToRtcpMux();
+}
+
+TEST_F(VoiceChannelTest, SendEarlyRtcpMuxToRtcp) {
+ Base::SendEarlyRtcpMuxToRtcp();
+}
+
+TEST_F(VoiceChannelTest, SendEarlyRtcpMuxToRtcpMux) {
+ Base::SendEarlyRtcpMuxToRtcpMux();
+}
+
+TEST_F(VoiceChannelTest, SendSrtpToSrtpRtcpMux) {
+ Base::SendSrtpToSrtp(RTCP_MUX, RTCP_MUX);
+}
+
+TEST_F(VoiceChannelTest, SendSrtpToRtp) {
+ Base::SendSrtpToSrtp();
+}
+
+TEST_F(VoiceChannelTest, SendSrtcpMux) {
+ Base::SendSrtpToSrtp(RTCP_MUX, RTCP_MUX);
+}
+
+TEST_F(VoiceChannelTest, SendDtlsSrtpToSrtp) {
+ MAYBE_SKIP_TEST(HaveDtlsSrtp);
+ Base::SendSrtpToSrtp(DTLS, 0);
+}
+
+TEST_F(VoiceChannelTest, SendDtlsSrtpToDtlsSrtp) {
+ MAYBE_SKIP_TEST(HaveDtlsSrtp);
+ Base::SendSrtpToSrtp(DTLS, DTLS);
+}
+
+TEST_F(VoiceChannelTest, SendDtlsSrtpToDtlsSrtpRtcpMux) {
+ MAYBE_SKIP_TEST(HaveDtlsSrtp);
+ Base::SendSrtpToSrtp(DTLS | RTCP_MUX, DTLS | RTCP_MUX);
+}
+
+TEST_F(VoiceChannelTest, SendEarlyMediaUsingRtcpMuxSrtp) {
+ Base::SendEarlyMediaUsingRtcpMuxSrtp();
+}
+
+TEST_F(VoiceChannelTest, SendRtpToRtpOnThread) {
+ Base::SendRtpToRtpOnThread();
+}
+
+TEST_F(VoiceChannelTest, SendSrtpToSrtpOnThread) {
+ Base::SendSrtpToSrtpOnThread();
+}
+
+TEST_F(VoiceChannelTest, SendWithWritabilityLoss) {
+ Base::SendWithWritabilityLoss();
+}
+
+TEST_F(VoiceChannelTest, TestMediaMonitor) {
+ Base::TestMediaMonitor();
+}
+
+// Test that MuteStream properly forwards to the media channel and does
+// not signal.
+TEST_F(VoiceChannelTest, TestVoiceSpecificMuteStream) {
+ CreateChannels(0, 0);
+ EXPECT_FALSE(media_channel1_->IsStreamMuted(0));
+ EXPECT_FALSE(mute_callback_recved_);
+ EXPECT_TRUE(channel1_->MuteStream(0, true));
+ EXPECT_TRUE(media_channel1_->IsStreamMuted(0));
+ EXPECT_FALSE(mute_callback_recved_);
+ EXPECT_TRUE(channel1_->MuteStream(0, false));
+ EXPECT_FALSE(media_channel1_->IsStreamMuted(0));
+ EXPECT_FALSE(mute_callback_recved_);
+}
+
+// Test that keyboard automute works correctly and signals upwards.
+TEST_F(VoiceChannelTest, TestKeyboardMute) {
+ CreateChannels(0, 0);
+ EXPECT_FALSE(media_channel1_->IsStreamMuted(0));
+ EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_NONE, error_);
+
+ cricket::VoiceMediaChannel::Error e =
+ cricket::VoiceMediaChannel::ERROR_REC_TYPING_NOISE_DETECTED;
+
+ // Typing doesn't mute automatically unless typing monitor has been installed
+ media_channel1_->TriggerError(0, e);
+ talk_base::Thread::Current()->ProcessMessages(0);
+ EXPECT_EQ(e, error_);
+ EXPECT_FALSE(media_channel1_->IsStreamMuted(0));
+ EXPECT_FALSE(mute_callback_recved_);
+
+ cricket::TypingMonitorOptions o = {0};
+ o.mute_period = 1500;
+ channel1_->StartTypingMonitor(o);
+ media_channel1_->TriggerError(0, e);
+ talk_base::Thread::Current()->ProcessMessages(0);
+ EXPECT_TRUE(media_channel1_->IsStreamMuted(0));
+ EXPECT_TRUE(mute_callback_recved_);
+}
+
+// Test that PressDTMF properly forwards to the media channel.
+TEST_F(VoiceChannelTest, TestDtmf) {
+ CreateChannels(0, 0);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(0U, media_channel1_->dtmf_info_queue().size());
+
+ EXPECT_TRUE(channel1_->PressDTMF(1, true));
+ EXPECT_TRUE(channel1_->PressDTMF(8, false));
+
+ ASSERT_EQ(2U, media_channel1_->dtmf_info_queue().size());
+ EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[0],
+ 0, 1, 160, cricket::DF_PLAY | cricket::DF_SEND));
+ EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[1],
+ 0, 8, 160, cricket::DF_SEND));
+}
+
+// Test that InsertDtmf properly forwards to the media channel.
+TEST_F(VoiceChannelTest, TestInsertDtmf) {
+ CreateChannels(0, 0);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_EQ(0U, media_channel1_->dtmf_info_queue().size());
+
+ EXPECT_TRUE(channel1_->InsertDtmf(1, 3, 100, cricket::DF_SEND));
+ EXPECT_TRUE(channel1_->InsertDtmf(2, 5, 110, cricket::DF_PLAY));
+ EXPECT_TRUE(channel1_->InsertDtmf(3, 7, 120,
+ cricket::DF_PLAY | cricket::DF_SEND));
+
+ ASSERT_EQ(3U, media_channel1_->dtmf_info_queue().size());
+ EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[0],
+ 1, 3, 100, cricket::DF_SEND));
+ EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[1],
+ 2, 5, 110, cricket::DF_PLAY));
+ EXPECT_TRUE(CompareDtmfInfo(media_channel1_->dtmf_info_queue()[2],
+ 3, 7, 120, cricket::DF_PLAY | cricket::DF_SEND));
+}
+
+TEST_F(VoiceChannelTest, TestMediaSinks) {
+ Base::TestMediaSinks();
+}
+
+TEST_F(VoiceChannelTest, TestSetContentFailure) {
+ Base::TestSetContentFailure();
+}
+
+TEST_F(VoiceChannelTest, TestSendTwoOffers) {
+ Base::TestSendTwoOffers();
+}
+
+TEST_F(VoiceChannelTest, TestReceiveTwoOffers) {
+ Base::TestReceiveTwoOffers();
+}
+
+TEST_F(VoiceChannelTest, TestSendPrAnswer) {
+ Base::TestSendPrAnswer();
+}
+
+TEST_F(VoiceChannelTest, TestReceivePrAnswer) {
+ Base::TestReceivePrAnswer();
+}
+
+TEST_F(VoiceChannelTest, TestFlushRtcp) {
+ Base::TestFlushRtcp();
+}
+
+TEST_F(VoiceChannelTest, TestChangeStateError) {
+ Base::TestChangeStateError();
+}
+
+TEST_F(VoiceChannelTest, TestSrtpError) {
+ Base::TestSrtpError();
+}
+
+TEST_F(VoiceChannelTest, TestOnReadyToSend) {
+ Base::TestOnReadyToSend();
+}
+
+TEST_F(VoiceChannelTest, TestOnReadyToSendWithRtcpMux) {
+ Base::TestOnReadyToSendWithRtcpMux();
+}
+
+// Test that we can play a ringback tone properly.
+TEST_F(VoiceChannelTest, TestRingbackTone) {
+ CreateChannels(RTCP, RTCP);
+ EXPECT_FALSE(media_channel1_->ringback_tone_play());
+ EXPECT_TRUE(channel1_->SetRingbackTone("RIFF", 4));
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ // Play ringback tone, no loop.
+ EXPECT_TRUE(channel1_->PlayRingbackTone(0, true, false));
+ EXPECT_EQ(0U, media_channel1_->ringback_tone_ssrc());
+ EXPECT_TRUE(media_channel1_->ringback_tone_play());
+ EXPECT_FALSE(media_channel1_->ringback_tone_loop());
+ // Stop the ringback tone.
+ EXPECT_TRUE(channel1_->PlayRingbackTone(0, false, false));
+ EXPECT_FALSE(media_channel1_->ringback_tone_play());
+ // Add a stream.
+ EXPECT_TRUE(AddStream1(1));
+ // Play ringback tone, looping, on the new stream.
+ EXPECT_TRUE(channel1_->PlayRingbackTone(1, true, true));
+ EXPECT_EQ(1U, media_channel1_->ringback_tone_ssrc());
+ EXPECT_TRUE(media_channel1_->ringback_tone_play());
+ EXPECT_TRUE(media_channel1_->ringback_tone_loop());
+ // Stop the ringback tone.
+ EXPECT_TRUE(channel1_->PlayRingbackTone(1, false, false));
+ EXPECT_FALSE(media_channel1_->ringback_tone_play());
+}
+
+// Test that we can scale the output volume properly for 1:1 calls.
+TEST_F(VoiceChannelTest, TestScaleVolume1to1Call) {
+ CreateChannels(RTCP, RTCP);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ double left, right;
+
+ // Default is (1.0, 1.0).
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ // invalid ssrc.
+ EXPECT_FALSE(media_channel1_->GetOutputScaling(3, &left, &right));
+
+ // Set scale to (1.5, 0.5).
+ EXPECT_TRUE(channel1_->SetOutputScaling(0, 1.5, 0.5));
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(1.5, left);
+ EXPECT_DOUBLE_EQ(0.5, right);
+
+ // Set scale to (0, 0).
+ EXPECT_TRUE(channel1_->SetOutputScaling(0, 0.0, 0.0));
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(0.0, left);
+ EXPECT_DOUBLE_EQ(0.0, right);
+}
+
+// Test that we can scale the output volume properly for multiway calls.
+TEST_F(VoiceChannelTest, TestScaleVolumeMultiwayCall) {
+ CreateChannels(RTCP, RTCP);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+ EXPECT_TRUE(AddStream1(1));
+ EXPECT_TRUE(AddStream1(2));
+
+ double left, right;
+ // Default is (1.0, 1.0).
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(1, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(2, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ // invalid ssrc.
+ EXPECT_FALSE(media_channel1_->GetOutputScaling(3, &left, &right));
+
+ // Set scale to (1.5, 0.5) for ssrc = 1.
+ EXPECT_TRUE(channel1_->SetOutputScaling(1, 1.5, 0.5));
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(1, &left, &right));
+ EXPECT_DOUBLE_EQ(1.5, left);
+ EXPECT_DOUBLE_EQ(0.5, right);
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(2, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(1.0, left);
+ EXPECT_DOUBLE_EQ(1.0, right);
+
+ // Set scale to (0, 0) for all ssrcs.
+ EXPECT_TRUE(channel1_->SetOutputScaling(0, 0.0, 0.0));
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(0.0, left);
+ EXPECT_DOUBLE_EQ(0.0, right);
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(1, &left, &right));
+ EXPECT_DOUBLE_EQ(0.0, left);
+ EXPECT_DOUBLE_EQ(0.0, right);
+ EXPECT_TRUE(media_channel1_->GetOutputScaling(2, &left, &right));
+ EXPECT_DOUBLE_EQ(0.0, left);
+ EXPECT_DOUBLE_EQ(0.0, right);
+}
+
+TEST_F(VoiceChannelTest, SendSsrcMuxToSsrcMux) {
+ Base::SendSsrcMuxToSsrcMux();
+}
+
+TEST_F(VoiceChannelTest, SendSsrcMuxToSsrcMuxWithRtcpMux) {
+ Base::SendSsrcMuxToSsrcMuxWithRtcpMux();
+}
+
+TEST_F(VoiceChannelTest, TestSetChannelOptions) {
+ TestSetChannelOptions();
+}
+
+// VideoChannelTest
+TEST_F(VideoChannelTest, TestInit) {
+ Base::TestInit();
+}
+
+TEST_F(VideoChannelTest, TestSetContents) {
+ Base::TestSetContents();
+}
+
+TEST_F(VideoChannelTest, TestSetContentsNullOffer) {
+ Base::TestSetContentsNullOffer();
+}
+
+TEST_F(VideoChannelTest, TestSetContentsRtcpMux) {
+ Base::TestSetContentsRtcpMux();
+}
+
+TEST_F(VideoChannelTest, TestSetContentsRtcpMuxWithPrAnswer) {
+ Base::TestSetContentsRtcpMux();
+}
+
+TEST_F(VideoChannelTest, TestSetContentsVideoOptions) {
+ Base::TestSetContentsVideoOptions();
+}
+
+TEST_F(VideoChannelTest, TestSetRemoteContentUpdate) {
+ Base::TestSetRemoteContentUpdate();
+}
+
+TEST_F(VideoChannelTest, TestStreams) {
+ Base::TestStreams();
+}
+
+TEST_F(VideoChannelTest, TestScreencastEvents) {
+ const int kTimeoutMs = 500;
+ TestInit();
+ FakeScreenCaptureFactory* screencapture_factory =
+ new FakeScreenCaptureFactory();
+ channel1_->SetScreenCaptureFactory(screencapture_factory);
+ cricket::ScreencastEventCatcher catcher;
+ channel1_->SignalScreencastWindowEvent.connect(
+ &catcher,
+ &cricket::ScreencastEventCatcher::OnEvent);
+ EXPECT_TRUE(channel1_->AddScreencast(0, ScreencastId(WindowId(0))) != NULL);
+ ASSERT_TRUE(screencapture_factory->window_capturer() != NULL);
+ EXPECT_EQ_WAIT(cricket::CS_STOPPED, screencapture_factory->capture_state(),
+ kTimeoutMs);
+ screencapture_factory->window_capturer()->SignalStateChange(
+ screencapture_factory->window_capturer(), cricket::CS_PAUSED);
+ EXPECT_EQ_WAIT(talk_base::WE_MINIMIZE, catcher.event(), kTimeoutMs);
+ screencapture_factory->window_capturer()->SignalStateChange(
+ screencapture_factory->window_capturer(), cricket::CS_RUNNING);
+ EXPECT_EQ_WAIT(talk_base::WE_RESTORE, catcher.event(), kTimeoutMs);
+ screencapture_factory->window_capturer()->SignalStateChange(
+ screencapture_factory->window_capturer(), cricket::CS_STOPPED);
+ EXPECT_EQ_WAIT(talk_base::WE_CLOSE, catcher.event(), kTimeoutMs);
+ EXPECT_TRUE(channel1_->RemoveScreencast(0));
+ ASSERT_TRUE(screencapture_factory->window_capturer() == NULL);
+}
+
+TEST_F(VideoChannelTest, TestUpdateStreamsInLocalContent) {
+ Base::TestUpdateStreamsInLocalContent();
+}
+
+TEST_F(VideoChannelTest, TestUpdateRemoteStreamsInContent) {
+ Base::TestUpdateStreamsInRemoteContent();
+}
+
+TEST_F(VideoChannelTest, TestChangeStreamParamsInContent) {
+ Base::TestChangeStreamParamsInContent();
+}
+
+TEST_F(VideoChannelTest, TestPlayoutAndSendingStates) {
+ Base::TestPlayoutAndSendingStates();
+}
+
+TEST_F(VideoChannelTest, TestMuteStream) {
+ Base::TestMuteStream();
+}
+
+TEST_F(VideoChannelTest, TestMediaContentDirection) {
+ Base::TestMediaContentDirection();
+}
+
+TEST_F(VideoChannelTest, TestCallSetup) {
+ Base::TestCallSetup();
+}
+
+TEST_F(VideoChannelTest, TestCallTeardownRtcpMux) {
+ Base::TestCallTeardownRtcpMux();
+}
+
+TEST_F(VideoChannelTest, SendRtpToRtp) {
+ Base::SendRtpToRtp();
+}
+
+TEST_F(VideoChannelTest, SendNoRtcpToNoRtcp) {
+ Base::SendNoRtcpToNoRtcp();
+}
+
+TEST_F(VideoChannelTest, SendNoRtcpToRtcp) {
+ Base::SendNoRtcpToRtcp();
+}
+
+TEST_F(VideoChannelTest, SendRtcpToNoRtcp) {
+ Base::SendRtcpToNoRtcp();
+}
+
+TEST_F(VideoChannelTest, SendRtcpToRtcp) {
+ Base::SendRtcpToRtcp();
+}
+
+TEST_F(VideoChannelTest, SendRtcpMuxToRtcp) {
+ Base::SendRtcpMuxToRtcp();
+}
+
+TEST_F(VideoChannelTest, SendRtcpMuxToRtcpMux) {
+ Base::SendRtcpMuxToRtcpMux();
+}
+
+TEST_F(VideoChannelTest, SendEarlyRtcpMuxToRtcp) {
+ Base::SendEarlyRtcpMuxToRtcp();
+}
+
+TEST_F(VideoChannelTest, SendEarlyRtcpMuxToRtcpMux) {
+ Base::SendEarlyRtcpMuxToRtcpMux();
+}
+
+TEST_F(VideoChannelTest, SendSrtpToSrtp) {
+ Base::SendSrtpToSrtp();
+}
+
+TEST_F(VideoChannelTest, SendSrtpToRtp) {
+ Base::SendSrtpToSrtp();
+}
+
+TEST_F(VideoChannelTest, SendDtlsSrtpToSrtp) {
+ MAYBE_SKIP_TEST(HaveDtlsSrtp);
+ Base::SendSrtpToSrtp(DTLS, 0);
+}
+
+TEST_F(VideoChannelTest, SendDtlsSrtpToDtlsSrtp) {
+ MAYBE_SKIP_TEST(HaveDtlsSrtp);
+ Base::SendSrtpToSrtp(DTLS, DTLS);
+}
+
+TEST_F(VideoChannelTest, SendDtlsSrtpToDtlsSrtpRtcpMux) {
+ MAYBE_SKIP_TEST(HaveDtlsSrtp);
+ Base::SendSrtpToSrtp(DTLS | RTCP_MUX, DTLS | RTCP_MUX);
+}
+
+TEST_F(VideoChannelTest, SendSrtcpMux) {
+ Base::SendSrtpToSrtp(RTCP_MUX, RTCP_MUX);
+}
+
+TEST_F(VideoChannelTest, SendEarlyMediaUsingRtcpMuxSrtp) {
+ Base::SendEarlyMediaUsingRtcpMuxSrtp();
+}
+
+TEST_F(VideoChannelTest, SendRtpToRtpOnThread) {
+ Base::SendRtpToRtpOnThread();
+}
+
+TEST_F(VideoChannelTest, SendSrtpToSrtpOnThread) {
+ Base::SendSrtpToSrtpOnThread();
+}
+
+TEST_F(VideoChannelTest, SendWithWritabilityLoss) {
+ Base::SendWithWritabilityLoss();
+}
+
+TEST_F(VideoChannelTest, TestMediaMonitor) {
+ Base::TestMediaMonitor();
+}
+
+TEST_F(VideoChannelTest, TestMediaSinks) {
+ Base::TestMediaSinks();
+}
+
+TEST_F(VideoChannelTest, TestSetContentFailure) {
+ Base::TestSetContentFailure();
+}
+
+TEST_F(VideoChannelTest, TestSendTwoOffers) {
+ Base::TestSendTwoOffers();
+}
+
+TEST_F(VideoChannelTest, TestReceiveTwoOffers) {
+ Base::TestReceiveTwoOffers();
+}
+
+TEST_F(VideoChannelTest, TestSendPrAnswer) {
+ Base::TestSendPrAnswer();
+}
+
+TEST_F(VideoChannelTest, TestReceivePrAnswer) {
+ Base::TestReceivePrAnswer();
+}
+
+TEST_F(VideoChannelTest, TestFlushRtcp) {
+ Base::TestFlushRtcp();
+}
+
+TEST_F(VideoChannelTest, SendSsrcMuxToSsrcMux) {
+ Base::SendSsrcMuxToSsrcMux();
+}
+
+TEST_F(VideoChannelTest, SendSsrcMuxToSsrcMuxWithRtcpMux) {
+ Base::SendSsrcMuxToSsrcMuxWithRtcpMux();
+}
+
+// TODO(gangji): Add VideoChannelTest.TestChangeStateError.
+
+TEST_F(VideoChannelTest, TestSrtpError) {
+ Base::TestSrtpError();
+}
+
+TEST_F(VideoChannelTest, TestOnReadyToSend) {
+ Base::TestOnReadyToSend();
+}
+
+TEST_F(VideoChannelTest, TestOnReadyToSendWithRtcpMux) {
+ Base::TestOnReadyToSendWithRtcpMux();
+}
+
+TEST_F(VideoChannelTest, TestApplyViewRequest) {
+ CreateChannels(0, 0);
+ cricket::StreamParams stream2;
+ stream2.id = "stream2";
+ stream2.ssrcs.push_back(2222);
+ local_media_content1_.AddStream(stream2);
+
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+
+ cricket::VideoFormat send_format;
+ EXPECT_TRUE(media_channel1_->GetSendStreamFormat(kSsrc1, &send_format));
+ EXPECT_EQ(640, send_format.width);
+ EXPECT_EQ(400, send_format.height);
+ EXPECT_EQ(cricket::VideoFormat::FpsToInterval(30), send_format.interval);
+
+ cricket::ViewRequest request;
+ // stream1: 320x200x15; stream2: 0x0x0
+ request.static_video_views.push_back(cricket::StaticVideoView(
+ cricket::StreamSelector(kSsrc1), 320, 200, 15));
+ EXPECT_TRUE(channel1_->ApplyViewRequest(request));
+ EXPECT_TRUE(media_channel1_->GetSendStreamFormat(kSsrc1, &send_format));
+ EXPECT_EQ(320, send_format.width);
+ EXPECT_EQ(200, send_format.height);
+ EXPECT_EQ(cricket::VideoFormat::FpsToInterval(15), send_format.interval);
+ EXPECT_TRUE(media_channel1_->GetSendStreamFormat(2222, &send_format));
+ EXPECT_EQ(0, send_format.width);
+ EXPECT_EQ(0, send_format.height);
+
+ // stream1: 160x100x8; stream2: 0x0x0
+ request.static_video_views.clear();
+ request.static_video_views.push_back(cricket::StaticVideoView(
+ cricket::StreamSelector(kSsrc1), 160, 100, 8));
+ EXPECT_TRUE(channel1_->ApplyViewRequest(request));
+ EXPECT_TRUE(media_channel1_->GetSendStreamFormat(kSsrc1, &send_format));
+ EXPECT_EQ(160, send_format.width);
+ EXPECT_EQ(100, send_format.height);
+ EXPECT_EQ(cricket::VideoFormat::FpsToInterval(8), send_format.interval);
+
+ // stream1: 0x0x0; stream2: 640x400x30
+ request.static_video_views.clear();
+ request.static_video_views.push_back(cricket::StaticVideoView(
+ cricket::StreamSelector("", stream2.id), 640, 400, 30));
+ EXPECT_TRUE(channel1_->ApplyViewRequest(request));
+ EXPECT_TRUE(media_channel1_->GetSendStreamFormat(kSsrc1, &send_format));
+ EXPECT_EQ(0, send_format.width);
+ EXPECT_EQ(0, send_format.height);
+ EXPECT_TRUE(media_channel1_->GetSendStreamFormat(2222, &send_format));
+ EXPECT_EQ(640, send_format.width);
+ EXPECT_EQ(400, send_format.height);
+ EXPECT_EQ(cricket::VideoFormat::FpsToInterval(30), send_format.interval);
+
+ // stream1: 0x0x0; stream2: 0x0x0
+ request.static_video_views.clear();
+ EXPECT_TRUE(channel1_->ApplyViewRequest(request));
+ EXPECT_TRUE(media_channel1_->GetSendStreamFormat(kSsrc1, &send_format));
+ EXPECT_EQ(0, send_format.width);
+ EXPECT_EQ(0, send_format.height);
+}
+
+TEST_F(VideoChannelTest, TestSetChannelOptions) {
+ TestSetChannelOptions();
+}
+
+
+// DataChannelTest
+
+class DataChannelTest
+ : public ChannelTest<DataTraits> {
+ public:
+ typedef ChannelTest<DataTraits>
+ Base;
+ DataChannelTest() : Base(kDataPacket, sizeof(kDataPacket),
+ kRtcpReport, sizeof(kRtcpReport)) {
+ }
+};
+
+// Override to avoid engine channel parameter.
+template<>
+cricket::DataChannel* ChannelTest<DataTraits>::CreateChannel(
+ talk_base::Thread* thread, cricket::MediaEngineInterface* engine,
+ cricket::FakeDataMediaChannel* ch, cricket::BaseSession* session,
+ bool rtcp) {
+ cricket::DataChannel* channel = new cricket::DataChannel(
+ thread, ch, session, cricket::CN_DATA, rtcp);
+ if (!channel->Init()) {
+ delete channel;
+ channel = NULL;
+ }
+ return channel;
+}
+
+template<>
+void ChannelTest<DataTraits>::CreateContent(
+ int flags,
+ const cricket::AudioCodec& audio_codec,
+ const cricket::VideoCodec& video_codec,
+ cricket::DataContentDescription* data) {
+ data->AddCodec(kGoogleDataCodec);
+ data->set_rtcp_mux((flags & RTCP_MUX) != 0);
+ if (flags & SECURE) {
+ data->AddCrypto(cricket::CryptoParams(
+ 1, cricket::CS_AES_CM_128_HMAC_SHA1_32,
+ "inline:" + talk_base::CreateRandomString(40), ""));
+ }
+}
+
+template<>
+void ChannelTest<DataTraits>::CopyContent(
+ const cricket::DataContentDescription& source,
+ cricket::DataContentDescription* data) {
+ *data = source;
+}
+
+template<>
+bool ChannelTest<DataTraits>::CodecMatches(const cricket::DataCodec& c1,
+ const cricket::DataCodec& c2) {
+ return c1.name == c2.name;
+}
+
+template<>
+void ChannelTest<DataTraits>::AddLegacyStreamInContent(
+ uint32 ssrc, int flags, cricket::DataContentDescription* data) {
+ data->AddLegacyStream(ssrc);
+}
+
+TEST_F(DataChannelTest, TestInit) {
+ Base::TestInit();
+ EXPECT_FALSE(media_channel1_->IsStreamMuted(0));
+}
+
+TEST_F(DataChannelTest, TestSetContents) {
+ Base::TestSetContents();
+}
+
+TEST_F(DataChannelTest, TestSetContentsNullOffer) {
+ Base::TestSetContentsNullOffer();
+}
+
+TEST_F(DataChannelTest, TestSetContentsRtcpMux) {
+ Base::TestSetContentsRtcpMux();
+}
+
+TEST_F(DataChannelTest, TestSetRemoteContentUpdate) {
+ Base::TestSetRemoteContentUpdate();
+}
+
+TEST_F(DataChannelTest, TestStreams) {
+ Base::TestStreams();
+}
+
+TEST_F(DataChannelTest, TestUpdateStreamsInLocalContent) {
+ Base::TestUpdateStreamsInLocalContent();
+}
+
+TEST_F(DataChannelTest, TestUpdateRemoteStreamsInContent) {
+ Base::TestUpdateStreamsInRemoteContent();
+}
+
+TEST_F(DataChannelTest, TestChangeStreamParamsInContent) {
+ Base::TestChangeStreamParamsInContent();
+}
+
+TEST_F(DataChannelTest, TestPlayoutAndSendingStates) {
+ Base::TestPlayoutAndSendingStates();
+}
+
+TEST_F(DataChannelTest, TestMediaContentDirection) {
+ Base::TestMediaContentDirection();
+}
+
+TEST_F(DataChannelTest, TestCallSetup) {
+ Base::TestCallSetup();
+}
+
+TEST_F(DataChannelTest, TestCallTeardownRtcpMux) {
+ Base::TestCallTeardownRtcpMux();
+}
+
+TEST_F(DataChannelTest, TestOnReadyToSend) {
+ Base::TestOnReadyToSend();
+}
+
+TEST_F(DataChannelTest, TestOnReadyToSendWithRtcpMux) {
+ Base::TestOnReadyToSendWithRtcpMux();
+}
+
+TEST_F(DataChannelTest, SendRtpToRtp) {
+ Base::SendRtpToRtp();
+}
+
+TEST_F(DataChannelTest, SendNoRtcpToNoRtcp) {
+ Base::SendNoRtcpToNoRtcp();
+}
+
+TEST_F(DataChannelTest, SendNoRtcpToRtcp) {
+ Base::SendNoRtcpToRtcp();
+}
+
+TEST_F(DataChannelTest, SendRtcpToNoRtcp) {
+ Base::SendRtcpToNoRtcp();
+}
+
+TEST_F(DataChannelTest, SendRtcpToRtcp) {
+ Base::SendRtcpToRtcp();
+}
+
+TEST_F(DataChannelTest, SendRtcpMuxToRtcp) {
+ Base::SendRtcpMuxToRtcp();
+}
+
+TEST_F(DataChannelTest, SendRtcpMuxToRtcpMux) {
+ Base::SendRtcpMuxToRtcpMux();
+}
+
+TEST_F(DataChannelTest, SendEarlyRtcpMuxToRtcp) {
+ Base::SendEarlyRtcpMuxToRtcp();
+}
+
+TEST_F(DataChannelTest, SendEarlyRtcpMuxToRtcpMux) {
+ Base::SendEarlyRtcpMuxToRtcpMux();
+}
+
+TEST_F(DataChannelTest, SendSrtpToSrtp) {
+ Base::SendSrtpToSrtp();
+}
+
+TEST_F(DataChannelTest, SendSrtpToRtp) {
+ Base::SendSrtpToSrtp();
+}
+
+TEST_F(DataChannelTest, SendSrtcpMux) {
+ Base::SendSrtpToSrtp(RTCP_MUX, RTCP_MUX);
+}
+
+TEST_F(DataChannelTest, SendRtpToRtpOnThread) {
+ Base::SendRtpToRtpOnThread();
+}
+
+TEST_F(DataChannelTest, SendSrtpToSrtpOnThread) {
+ Base::SendSrtpToSrtpOnThread();
+}
+
+TEST_F(DataChannelTest, SendWithWritabilityLoss) {
+ Base::SendWithWritabilityLoss();
+}
+
+TEST_F(DataChannelTest, TestMediaMonitor) {
+ Base::TestMediaMonitor();
+}
+
+TEST_F(DataChannelTest, TestSendData) {
+ CreateChannels(0, 0);
+ EXPECT_TRUE(SendInitiate());
+ EXPECT_TRUE(SendAccept());
+
+ cricket::SendDataParams params;
+ params.ssrc = 42;
+ unsigned char data[] = {
+ 'f', 'o', 'o'
+ };
+ talk_base::Buffer payload(data, 3);
+ cricket::SendDataResult result;
+ ASSERT_TRUE(media_channel1_->SendData(params, payload, &result));
+ EXPECT_EQ(params.ssrc,
+ media_channel1_->last_sent_data_params().ssrc);
+ EXPECT_EQ("foo", media_channel1_->last_sent_data());
+}
+
+// TODO(pthatcher): TestSetReceiver?
diff --git a/chromium/third_party/libjingle/source/talk/session/media/channelmanager.cc b/chromium/third_party/libjingle/source/talk/session/media/channelmanager.cc
new file mode 100644
index 00000000000..c16b066be11
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/channelmanager.cc
@@ -0,0 +1,928 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/channelmanager.h"
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <algorithm>
+
+#include "talk/base/bind.h"
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+#include "talk/base/sigslotrepeater.h"
+#include "talk/base/stringencode.h"
+#include "talk/base/stringutils.h"
+#include "talk/media/base/capturemanager.h"
+#include "talk/media/base/hybriddataengine.h"
+#include "talk/media/base/rtpdataengine.h"
+#include "talk/media/base/videocapturer.h"
+#include "talk/media/devices/devicemanager.h"
+#ifdef HAVE_SCTP
+#include "talk/media/sctp/sctpdataengine.h"
+#endif
+#include "talk/session/media/soundclip.h"
+#include "talk/session/media/srtpfilter.h"
+
+namespace cricket {
+
+enum {
+ MSG_VIDEOCAPTURESTATE = 1,
+};
+
+using talk_base::Bind;
+
+static const int kNotSetOutputVolume = -1;
+
+struct CaptureStateParams : public talk_base::MessageData {
+ CaptureStateParams(cricket::VideoCapturer* c, cricket::CaptureState s)
+ : capturer(c),
+ state(s) {}
+ cricket::VideoCapturer* capturer;
+ cricket::CaptureState state;
+};
+
+static DataEngineInterface* ConstructDataEngine() {
+#ifdef HAVE_SCTP
+ return new HybridDataEngine(new RtpDataEngine(), new SctpDataEngine());
+#else
+ return new RtpDataEngine();
+#endif
+}
+
+#if !defined(DISABLE_MEDIA_ENGINE_FACTORY)
+ChannelManager::ChannelManager(talk_base::Thread* worker_thread) {
+ Construct(MediaEngineFactory::Create(),
+ ConstructDataEngine(),
+ cricket::DeviceManagerFactory::Create(),
+ new CaptureManager(),
+ worker_thread);
+}
+#endif
+
+ChannelManager::ChannelManager(MediaEngineInterface* me,
+ DataEngineInterface* dme,
+ DeviceManagerInterface* dm,
+ CaptureManager* cm,
+ talk_base::Thread* worker_thread) {
+ Construct(me, dme, dm, cm, worker_thread);
+}
+
+ChannelManager::ChannelManager(MediaEngineInterface* me,
+ DeviceManagerInterface* dm,
+ talk_base::Thread* worker_thread) {
+ Construct(me,
+ ConstructDataEngine(),
+ dm,
+ new CaptureManager(),
+ worker_thread);
+}
+
+void ChannelManager::Construct(MediaEngineInterface* me,
+ DataEngineInterface* dme,
+ DeviceManagerInterface* dm,
+ CaptureManager* cm,
+ talk_base::Thread* worker_thread) {
+ media_engine_.reset(me);
+ data_media_engine_.reset(dme);
+ device_manager_.reset(dm);
+ capture_manager_.reset(cm);
+ initialized_ = false;
+ main_thread_ = talk_base::Thread::Current();
+ worker_thread_ = worker_thread;
+ audio_in_device_ = DeviceManagerInterface::kDefaultDeviceName;
+ audio_out_device_ = DeviceManagerInterface::kDefaultDeviceName;
+ audio_options_ = MediaEngineInterface::DEFAULT_AUDIO_OPTIONS;
+ audio_delay_offset_ = MediaEngineInterface::kDefaultAudioDelayOffset;
+ audio_output_volume_ = kNotSetOutputVolume;
+ local_renderer_ = NULL;
+ capturing_ = false;
+ monitoring_ = false;
+ enable_rtx_ = false;
+
+ // Init the device manager immediately, and set up our default video device.
+ SignalDevicesChange.repeat(device_manager_->SignalDevicesChange);
+ device_manager_->Init();
+
+ // Camera is started asynchronously, request callbacks when startup
+ // completes to be able to forward them to the rendering manager.
+ media_engine_->SignalVideoCaptureStateChange().connect(
+ this, &ChannelManager::OnVideoCaptureStateChange);
+ capture_manager_->SignalCapturerStateChange.connect(
+ this, &ChannelManager::OnVideoCaptureStateChange);
+}
+
+ChannelManager::~ChannelManager() {
+ if (initialized_) {
+ Terminate();
+ // If srtp is initialized (done by the Channel) then we must call
+ // srtp_shutdown to free all crypto kernel lists. But we need to make sure
+ // shutdown always called at the end, after channels are destroyed.
+ // ChannelManager d'tor is always called last, it's safe place to call
+ // shutdown.
+ ShutdownSrtp();
+ }
+}
+
+bool ChannelManager::SetVideoRtxEnabled(bool enable) {
+ // To be safe, this call is only allowed before initialization. Apps like
+ // Flute only have a singleton ChannelManager and we don't want this flag to
+ // be toggled between calls or when there's concurrent calls. We expect apps
+ // to enable this at startup and retain that setting for the lifetime of the
+ // app.
+ if (!initialized_) {
+ enable_rtx_ = enable;
+ return true;
+ } else {
+ LOG(LS_WARNING) << "Cannot toggle rtx after initialization!";
+ return false;
+ }
+}
+
+int ChannelManager::GetCapabilities() {
+ return media_engine_->GetCapabilities() & device_manager_->GetCapabilities();
+}
+
+void ChannelManager::GetSupportedAudioCodecs(
+ std::vector<AudioCodec>* codecs) const {
+ codecs->clear();
+
+ for (std::vector<AudioCodec>::const_iterator it =
+ media_engine_->audio_codecs().begin();
+ it != media_engine_->audio_codecs().end(); ++it) {
+ codecs->push_back(*it);
+ }
+}
+
+void ChannelManager::GetSupportedAudioRtpHeaderExtensions(
+ RtpHeaderExtensions* ext) const {
+ *ext = media_engine_->audio_rtp_header_extensions();
+}
+
+void ChannelManager::GetSupportedVideoCodecs(
+ std::vector<VideoCodec>* codecs) const {
+ codecs->clear();
+
+ std::vector<VideoCodec>::const_iterator it;
+ for (it = media_engine_->video_codecs().begin();
+ it != media_engine_->video_codecs().end(); ++it) {
+ if (!enable_rtx_ && _stricmp(kRtxCodecName, it->name.c_str()) == 0) {
+ continue;
+ }
+ codecs->push_back(*it);
+ }
+}
+
+void ChannelManager::GetSupportedVideoRtpHeaderExtensions(
+ RtpHeaderExtensions* ext) const {
+ *ext = media_engine_->video_rtp_header_extensions();
+}
+
+void ChannelManager::GetSupportedDataCodecs(
+ std::vector<DataCodec>* codecs) const {
+ *codecs = data_media_engine_->data_codecs();
+}
+
+bool ChannelManager::Init() {
+ ASSERT(!initialized_);
+ if (initialized_) {
+ return false;
+ }
+
+ ASSERT(worker_thread_ != NULL);
+ if (worker_thread_ && worker_thread_->started()) {
+ if (media_engine_->Init(worker_thread_)) {
+ initialized_ = true;
+
+ // Now that we're initialized, apply any stored preferences. A preferred
+ // device might have been unplugged. In this case, we fallback to the
+ // default device but keep the user preferences. The preferences are
+ // changed only when the Javascript FE changes them.
+ const std::string preferred_audio_in_device = audio_in_device_;
+ const std::string preferred_audio_out_device = audio_out_device_;
+ const std::string preferred_camera_device = camera_device_;
+ Device device;
+ if (!device_manager_->GetAudioInputDevice(audio_in_device_, &device)) {
+ LOG(LS_WARNING) << "The preferred microphone '" << audio_in_device_
+ << "' is unavailable. Fall back to the default.";
+ audio_in_device_ = DeviceManagerInterface::kDefaultDeviceName;
+ }
+ if (!device_manager_->GetAudioOutputDevice(audio_out_device_, &device)) {
+ LOG(LS_WARNING) << "The preferred speaker '" << audio_out_device_
+ << "' is unavailable. Fall back to the default.";
+ audio_out_device_ = DeviceManagerInterface::kDefaultDeviceName;
+ }
+ if (!device_manager_->GetVideoCaptureDevice(camera_device_, &device)) {
+ if (!camera_device_.empty()) {
+ LOG(LS_WARNING) << "The preferred camera '" << camera_device_
+ << "' is unavailable. Fall back to the default.";
+ }
+ camera_device_ = DeviceManagerInterface::kDefaultDeviceName;
+ }
+
+ if (!SetAudioOptions(audio_in_device_, audio_out_device_,
+ audio_options_, audio_delay_offset_)) {
+ LOG(LS_WARNING) << "Failed to SetAudioOptions with"
+ << " microphone: " << audio_in_device_
+ << " speaker: " << audio_out_device_
+ << " options: " << audio_options_
+ << " delay: " << audio_delay_offset_;
+ }
+
+ // If audio_output_volume_ has been set via SetOutputVolume(), set the
+ // audio output volume of the engine.
+ if (kNotSetOutputVolume != audio_output_volume_ &&
+ !SetOutputVolume(audio_output_volume_)) {
+ LOG(LS_WARNING) << "Failed to SetOutputVolume to "
+ << audio_output_volume_;
+ }
+ if (!SetCaptureDevice(camera_device_) && !camera_device_.empty()) {
+ LOG(LS_WARNING) << "Failed to SetCaptureDevice with camera: "
+ << camera_device_;
+ }
+
+ // Restore the user preferences.
+ audio_in_device_ = preferred_audio_in_device;
+ audio_out_device_ = preferred_audio_out_device;
+ camera_device_ = preferred_camera_device;
+
+ // Now apply the default video codec that has been set earlier.
+ if (default_video_encoder_config_.max_codec.id != 0) {
+ SetDefaultVideoEncoderConfig(default_video_encoder_config_);
+ }
+ // And the local renderer.
+ if (local_renderer_) {
+ SetLocalRenderer(local_renderer_);
+ }
+ }
+ }
+ return initialized_;
+}
+
+void ChannelManager::Terminate() {
+ ASSERT(initialized_);
+ if (!initialized_) {
+ return;
+ }
+ worker_thread_->Invoke<void>(Bind(&ChannelManager::Terminate_w, this));
+ media_engine_->Terminate();
+ initialized_ = false;
+}
+
+void ChannelManager::Terminate_w() {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ // Need to destroy the voice/video channels
+ while (!video_channels_.empty()) {
+ DestroyVideoChannel_w(video_channels_.back());
+ }
+ while (!voice_channels_.empty()) {
+ DestroyVoiceChannel_w(voice_channels_.back());
+ }
+ while (!soundclips_.empty()) {
+ DestroySoundclip_w(soundclips_.back());
+ }
+ if (!SetCaptureDevice_w(NULL)) {
+ LOG(LS_WARNING) << "failed to delete video capturer";
+ }
+}
+
+VoiceChannel* ChannelManager::CreateVoiceChannel(
+ BaseSession* session, const std::string& content_name, bool rtcp) {
+ return worker_thread_->Invoke<VoiceChannel*>(
+ Bind(&ChannelManager::CreateVoiceChannel_w, this,
+ session, content_name, rtcp));
+}
+
+VoiceChannel* ChannelManager::CreateVoiceChannel_w(
+ BaseSession* session, const std::string& content_name, bool rtcp) {
+ // This is ok to alloc from a thread other than the worker thread
+ ASSERT(initialized_);
+ VoiceMediaChannel* media_channel = media_engine_->CreateChannel();
+ if (media_channel == NULL)
+ return NULL;
+
+ VoiceChannel* voice_channel = new VoiceChannel(
+ worker_thread_, media_engine_.get(), media_channel,
+ session, content_name, rtcp);
+ if (!voice_channel->Init()) {
+ delete voice_channel;
+ return NULL;
+ }
+ voice_channels_.push_back(voice_channel);
+ return voice_channel;
+}
+
+void ChannelManager::DestroyVoiceChannel(VoiceChannel* voice_channel) {
+ if (voice_channel) {
+ worker_thread_->Invoke<void>(
+ Bind(&ChannelManager::DestroyVoiceChannel_w, this, voice_channel));
+ }
+}
+
+void ChannelManager::DestroyVoiceChannel_w(VoiceChannel* voice_channel) {
+ // Destroy voice channel.
+ ASSERT(initialized_);
+ VoiceChannels::iterator it = std::find(voice_channels_.begin(),
+ voice_channels_.end(), voice_channel);
+ ASSERT(it != voice_channels_.end());
+ if (it == voice_channels_.end())
+ return;
+
+ voice_channels_.erase(it);
+ delete voice_channel;
+}
+
+VideoChannel* ChannelManager::CreateVideoChannel(
+ BaseSession* session, const std::string& content_name, bool rtcp,
+ VoiceChannel* voice_channel) {
+ return worker_thread_->Invoke<VideoChannel*>(
+ Bind(&ChannelManager::CreateVideoChannel_w, this, session,
+ content_name, rtcp, voice_channel));
+}
+
+VideoChannel* ChannelManager::CreateVideoChannel_w(
+ BaseSession* session, const std::string& content_name, bool rtcp,
+ VoiceChannel* voice_channel) {
+ // This is ok to alloc from a thread other than the worker thread
+ ASSERT(initialized_);
+ VideoMediaChannel* media_channel =
+ // voice_channel can be NULL in case of NullVoiceEngine.
+ media_engine_->CreateVideoChannel(voice_channel ?
+ voice_channel->media_channel() : NULL);
+ if (media_channel == NULL)
+ return NULL;
+
+ VideoChannel* video_channel = new VideoChannel(
+ worker_thread_, media_engine_.get(), media_channel,
+ session, content_name, rtcp, voice_channel);
+ if (!video_channel->Init()) {
+ delete video_channel;
+ return NULL;
+ }
+ video_channels_.push_back(video_channel);
+ return video_channel;
+}
+
+void ChannelManager::DestroyVideoChannel(VideoChannel* video_channel) {
+ if (video_channel) {
+ worker_thread_->Invoke<void>(
+ Bind(&ChannelManager::DestroyVideoChannel_w, this, video_channel));
+ }
+}
+
+void ChannelManager::DestroyVideoChannel_w(VideoChannel* video_channel) {
+ // Destroy video channel.
+ ASSERT(initialized_);
+ VideoChannels::iterator it = std::find(video_channels_.begin(),
+ video_channels_.end(), video_channel);
+ ASSERT(it != video_channels_.end());
+ if (it == video_channels_.end())
+ return;
+
+ video_channels_.erase(it);
+ delete video_channel;
+}
+
+DataChannel* ChannelManager::CreateDataChannel(
+ BaseSession* session, const std::string& content_name,
+ bool rtcp, DataChannelType channel_type) {
+ return worker_thread_->Invoke<DataChannel*>(
+ Bind(&ChannelManager::CreateDataChannel_w, this, session, content_name,
+ rtcp, channel_type));
+}
+
+DataChannel* ChannelManager::CreateDataChannel_w(
+ BaseSession* session, const std::string& content_name,
+ bool rtcp, DataChannelType data_channel_type) {
+ // This is ok to alloc from a thread other than the worker thread.
+ ASSERT(initialized_);
+ DataMediaChannel* media_channel = data_media_engine_->CreateChannel(
+ data_channel_type);
+ if (!media_channel) {
+ LOG(LS_WARNING) << "Failed to create data channel of type "
+ << data_channel_type;
+ return NULL;
+ }
+
+ DataChannel* data_channel = new DataChannel(
+ worker_thread_, media_channel,
+ session, content_name, rtcp);
+ if (!data_channel->Init()) {
+ LOG(LS_WARNING) << "Failed to init data channel.";
+ delete data_channel;
+ return NULL;
+ }
+ data_channels_.push_back(data_channel);
+ return data_channel;
+}
+
+void ChannelManager::DestroyDataChannel(DataChannel* data_channel) {
+ if (data_channel) {
+ worker_thread_->Invoke<void>(
+ Bind(&ChannelManager::DestroyDataChannel_w, this, data_channel));
+ }
+}
+
+void ChannelManager::DestroyDataChannel_w(DataChannel* data_channel) {
+ // Destroy data channel.
+ ASSERT(initialized_);
+ DataChannels::iterator it = std::find(data_channels_.begin(),
+ data_channels_.end(), data_channel);
+ ASSERT(it != data_channels_.end());
+ if (it == data_channels_.end())
+ return;
+
+ data_channels_.erase(it);
+ delete data_channel;
+}
+
+Soundclip* ChannelManager::CreateSoundclip() {
+ return worker_thread_->Invoke<Soundclip*>(
+ Bind(&ChannelManager::CreateSoundclip_w, this));
+}
+
+Soundclip* ChannelManager::CreateSoundclip_w() {
+ ASSERT(initialized_);
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+
+ SoundclipMedia* soundclip_media = media_engine_->CreateSoundclip();
+ if (!soundclip_media) {
+ return NULL;
+ }
+
+ Soundclip* soundclip = new Soundclip(worker_thread_, soundclip_media);
+ soundclips_.push_back(soundclip);
+ return soundclip;
+}
+
+void ChannelManager::DestroySoundclip(Soundclip* soundclip) {
+ if (soundclip) {
+ worker_thread_->Invoke<void>(
+ Bind(&ChannelManager::DestroySoundclip_w, this, soundclip));
+ }
+}
+
+void ChannelManager::DestroySoundclip_w(Soundclip* soundclip) {
+ // Destroy soundclip.
+ ASSERT(initialized_);
+ Soundclips::iterator it = std::find(soundclips_.begin(),
+ soundclips_.end(), soundclip);
+ ASSERT(it != soundclips_.end());
+ if (it == soundclips_.end())
+ return;
+
+ soundclips_.erase(it);
+ delete soundclip;
+}
+
+bool ChannelManager::GetAudioOptions(std::string* in_name,
+ std::string* out_name, int* opts) {
+ if (in_name)
+ *in_name = audio_in_device_;
+ if (out_name)
+ *out_name = audio_out_device_;
+ if (opts)
+ *opts = audio_options_;
+ return true;
+}
+
+bool ChannelManager::SetAudioOptions(const std::string& in_name,
+ const std::string& out_name, int opts) {
+ return SetAudioOptions(in_name, out_name, opts, audio_delay_offset_);
+}
+
+bool ChannelManager::SetAudioOptions(const std::string& in_name,
+ const std::string& out_name, int opts,
+ int delay_offset) {
+ // Get device ids from DeviceManager.
+ Device in_dev, out_dev;
+ if (!device_manager_->GetAudioInputDevice(in_name, &in_dev)) {
+ LOG(LS_WARNING) << "Failed to GetAudioInputDevice: " << in_name;
+ return false;
+ }
+ if (!device_manager_->GetAudioOutputDevice(out_name, &out_dev)) {
+ LOG(LS_WARNING) << "Failed to GetAudioOutputDevice: " << out_name;
+ return false;
+ }
+
+ // If we're initialized, pass the settings to the media engine.
+ bool ret = true;
+ if (initialized_) {
+ ret = worker_thread_->Invoke<bool>(
+ Bind(&ChannelManager::SetAudioOptions_w, this,
+ opts, delay_offset, &in_dev, &out_dev));
+ }
+
+ // If all worked well, save the values for use in GetAudioOptions.
+ if (ret) {
+ audio_options_ = opts;
+ audio_in_device_ = in_name;
+ audio_out_device_ = out_name;
+ audio_delay_offset_ = delay_offset;
+ }
+ return ret;
+}
+
+bool ChannelManager::SetAudioOptions_w(int opts, int delay_offset,
+ const Device* in_dev, const Device* out_dev) {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ ASSERT(initialized_);
+
+ // Set audio options
+ bool ret = media_engine_->SetAudioOptions(opts);
+
+ if (ret) {
+ ret = media_engine_->SetAudioDelayOffset(delay_offset);
+ }
+
+ // Set the audio devices
+ if (ret) {
+ ret = media_engine_->SetSoundDevices(in_dev, out_dev);
+ }
+
+ return ret;
+}
+
+bool ChannelManager::GetOutputVolume(int* level) {
+ if (!initialized_) {
+ return false;
+ }
+ return worker_thread_->Invoke<bool>(
+ Bind(&MediaEngineInterface::GetOutputVolume, media_engine_.get(), level));
+}
+
+bool ChannelManager::SetOutputVolume(int level) {
+ bool ret = level >= 0 && level <= 255;
+ if (initialized_) {
+ ret &= worker_thread_->Invoke<bool>(
+ Bind(&MediaEngineInterface::SetOutputVolume,
+ media_engine_.get(), level));
+ }
+
+ if (ret) {
+ audio_output_volume_ = level;
+ }
+
+ return ret;
+}
+
+bool ChannelManager::IsSameCapturer(const std::string& capturer_name,
+ VideoCapturer* capturer) {
+ if (capturer == NULL) {
+ return false;
+ }
+ Device device;
+ if (!device_manager_->GetVideoCaptureDevice(capturer_name, &device)) {
+ return false;
+ }
+ return capturer->GetId() == device.id;
+}
+
+bool ChannelManager::GetVideoCaptureDevice(Device* device) {
+ std::string device_name;
+ if (!GetCaptureDevice(&device_name)) {
+ return false;
+ }
+ return device_manager_->GetVideoCaptureDevice(device_name, device);
+}
+
+bool ChannelManager::GetCaptureDevice(std::string* cam_name) {
+ if (camera_device_.empty()) {
+ // Initialize camera_device_ with default.
+ Device device;
+ if (!device_manager_->GetVideoCaptureDevice(
+ DeviceManagerInterface::kDefaultDeviceName, &device)) {
+ LOG(LS_WARNING) << "Device manager can't find default camera: " <<
+ DeviceManagerInterface::kDefaultDeviceName;
+ return false;
+ }
+ camera_device_ = device.name;
+ }
+ *cam_name = camera_device_;
+ return true;
+}
+
+bool ChannelManager::SetCaptureDevice(const std::string& cam_name) {
+ Device device;
+ bool ret = true;
+ if (!device_manager_->GetVideoCaptureDevice(cam_name, &device)) {
+ if (!cam_name.empty()) {
+ LOG(LS_WARNING) << "Device manager can't find camera: " << cam_name;
+ }
+ ret = false;
+ }
+
+ // If we're running, tell the media engine about it.
+ if (initialized_ && ret) {
+ ret = worker_thread_->Invoke<bool>(
+ Bind(&ChannelManager::SetCaptureDevice_w, this, &device));
+ }
+
+ // If everything worked, retain the name of the selected camera.
+ if (ret) {
+ camera_device_ = device.name;
+ } else if (camera_device_.empty()) {
+ // When video option setting fails, we still want camera_device_ to be in a
+ // good state, so we initialize it with default if it's empty.
+ Device default_device;
+ if (!device_manager_->GetVideoCaptureDevice(
+ DeviceManagerInterface::kDefaultDeviceName, &default_device)) {
+ LOG(LS_WARNING) << "Device manager can't find default camera: " <<
+ DeviceManagerInterface::kDefaultDeviceName;
+ }
+ camera_device_ = default_device.name;
+ }
+
+ return ret;
+}
+
+VideoCapturer* ChannelManager::CreateVideoCapturer() {
+ Device device;
+ if (!device_manager_->GetVideoCaptureDevice(camera_device_, &device)) {
+ if (!camera_device_.empty()) {
+ LOG(LS_WARNING) << "Device manager can't find camera: " << camera_device_;
+ }
+ return NULL;
+ }
+ return device_manager_->CreateVideoCapturer(device);
+}
+
+bool ChannelManager::SetCaptureDevice_w(const Device* cam_device) {
+ ASSERT(worker_thread_ == talk_base::Thread::Current());
+ ASSERT(initialized_);
+
+ if (!cam_device) {
+ video_device_name_.clear();
+ return true;
+ }
+ video_device_name_ = cam_device->name;
+ return true;
+}
+
+bool ChannelManager::SetDefaultVideoEncoderConfig(const VideoEncoderConfig& c) {
+ bool ret = true;
+ if (initialized_) {
+ ret = worker_thread_->Invoke<bool>(
+ Bind(&MediaEngineInterface::SetDefaultVideoEncoderConfig,
+ media_engine_.get(), c));
+ }
+ if (ret) {
+ default_video_encoder_config_ = c;
+ }
+ return ret;
+}
+
+bool ChannelManager::SetLocalMonitor(bool enable) {
+ bool ret = initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&MediaEngineInterface::SetLocalMonitor,
+ media_engine_.get(), enable));
+ if (ret) {
+ monitoring_ = enable;
+ }
+ return ret;
+}
+
+bool ChannelManager::SetLocalRenderer(VideoRenderer* renderer) {
+ bool ret = true;
+ if (initialized_) {
+ ret = worker_thread_->Invoke<bool>(
+ Bind(&MediaEngineInterface::SetLocalRenderer,
+ media_engine_.get(), renderer));
+ }
+ if (ret) {
+ local_renderer_ = renderer;
+ }
+ return ret;
+}
+
+void ChannelManager::SetVoiceLogging(int level, const char* filter) {
+ if (initialized_) {
+ worker_thread_->Invoke<void>(
+ Bind(&MediaEngineInterface::SetVoiceLogging,
+ media_engine_.get(), level, filter));
+ } else {
+ media_engine_->SetVoiceLogging(level, filter);
+ }
+}
+
+void ChannelManager::SetVideoLogging(int level, const char* filter) {
+ if (initialized_) {
+ worker_thread_->Invoke<void>(
+ Bind(&MediaEngineInterface::SetVideoLogging,
+ media_engine_.get(), level, filter));
+ } else {
+ media_engine_->SetVideoLogging(level, filter);
+ }
+}
+
+// TODO(janahan): For now pass this request through the mediaengine to the
+// voice and video engines to do the real work. Once the capturer refactoring
+// is done, we will access the capturer using the ssrc (similar to how the
+// renderer is accessed today) and register with it directly.
+bool ChannelManager::RegisterVideoProcessor(VideoCapturer* capturer,
+ VideoProcessor* processor) {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&ChannelManager::RegisterVideoProcessor_w, this,
+ capturer, processor));
+}
+
+bool ChannelManager::RegisterVideoProcessor_w(VideoCapturer* capturer,
+ VideoProcessor* processor) {
+ return capture_manager_->AddVideoProcessor(capturer, processor);
+}
+
+bool ChannelManager::UnregisterVideoProcessor(VideoCapturer* capturer,
+ VideoProcessor* processor) {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&ChannelManager::UnregisterVideoProcessor_w, this,
+ capturer, processor));
+}
+
+bool ChannelManager::UnregisterVideoProcessor_w(VideoCapturer* capturer,
+ VideoProcessor* processor) {
+ return capture_manager_->RemoveVideoProcessor(capturer, processor);
+}
+
+bool ChannelManager::RegisterVoiceProcessor(
+ uint32 ssrc,
+ VoiceProcessor* processor,
+ MediaProcessorDirection direction) {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&MediaEngineInterface::RegisterVoiceProcessor, media_engine_.get(),
+ ssrc, processor, direction));
+}
+
+bool ChannelManager::UnregisterVoiceProcessor(
+ uint32 ssrc,
+ VoiceProcessor* processor,
+ MediaProcessorDirection direction) {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&MediaEngineInterface::UnregisterVoiceProcessor,
+ media_engine_.get(), ssrc, processor, direction));
+}
+
+// The following are done in the new "CaptureManager" style that
+// all local video capturers, processors, and managers should move
+// to.
+// TODO(pthatcher): Add more of the CaptureManager interface.
+bool ChannelManager::StartVideoCapture(
+ VideoCapturer* capturer, const VideoFormat& video_format) {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&CaptureManager::StartVideoCapture,
+ capture_manager_.get(), capturer, video_format));
+}
+
+bool ChannelManager::MuteToBlackThenPause(
+ VideoCapturer* video_capturer, bool muted) {
+ if (!initialized_) {
+ return false;
+ }
+ worker_thread_->Invoke<void>(
+ Bind(&VideoCapturer::MuteToBlackThenPause, video_capturer, muted));
+ return true;
+}
+
+bool ChannelManager::StopVideoCapture(
+ VideoCapturer* capturer, const VideoFormat& video_format) {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&CaptureManager::StopVideoCapture,
+ capture_manager_.get(), capturer, video_format));
+}
+
+bool ChannelManager::RestartVideoCapture(
+ VideoCapturer* video_capturer,
+ const VideoFormat& previous_format,
+ const VideoFormat& desired_format,
+ CaptureManager::RestartOptions options) {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&CaptureManager::RestartVideoCapture, capture_manager_.get(),
+ video_capturer, previous_format, desired_format, options));
+}
+
+bool ChannelManager::AddVideoRenderer(
+ VideoCapturer* capturer, VideoRenderer* renderer) {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&CaptureManager::AddVideoRenderer,
+ capture_manager_.get(), capturer, renderer));
+}
+
+bool ChannelManager::RemoveVideoRenderer(
+ VideoCapturer* capturer, VideoRenderer* renderer) {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&CaptureManager::RemoveVideoRenderer,
+ capture_manager_.get(), capturer, renderer));
+}
+
+bool ChannelManager::IsScreencastRunning() const {
+ return initialized_ && worker_thread_->Invoke<bool>(
+ Bind(&ChannelManager::IsScreencastRunning_w, this));
+}
+
+bool ChannelManager::IsScreencastRunning_w() const {
+ VideoChannels::const_iterator it = video_channels_.begin();
+ for ( ; it != video_channels_.end(); ++it) {
+ if ((*it) && (*it)->IsScreencasting()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void ChannelManager::OnVideoCaptureStateChange(VideoCapturer* capturer,
+ CaptureState result) {
+ // TODO(whyuan): Check capturer and signal failure only for camera video, not
+ // screencast.
+ capturing_ = result == CS_RUNNING;
+ main_thread_->Post(this, MSG_VIDEOCAPTURESTATE,
+ new CaptureStateParams(capturer, result));
+}
+
+void ChannelManager::OnMessage(talk_base::Message* message) {
+ switch (message->message_id) {
+ case MSG_VIDEOCAPTURESTATE: {
+ CaptureStateParams* data =
+ static_cast<CaptureStateParams*>(message->pdata);
+ SignalVideoCaptureStateChange(data->capturer, data->state);
+ delete data;
+ break;
+ }
+ }
+}
+
+
+static void GetDeviceNames(const std::vector<Device>& devs,
+ std::vector<std::string>* names) {
+ names->clear();
+ for (size_t i = 0; i < devs.size(); ++i) {
+ names->push_back(devs[i].name);
+ }
+}
+
+bool ChannelManager::GetAudioInputDevices(std::vector<std::string>* names) {
+ names->clear();
+ std::vector<Device> devs;
+ bool ret = device_manager_->GetAudioInputDevices(&devs);
+ if (ret)
+ GetDeviceNames(devs, names);
+
+ return ret;
+}
+
+bool ChannelManager::GetAudioOutputDevices(std::vector<std::string>* names) {
+ names->clear();
+ std::vector<Device> devs;
+ bool ret = device_manager_->GetAudioOutputDevices(&devs);
+ if (ret)
+ GetDeviceNames(devs, names);
+
+ return ret;
+}
+
+bool ChannelManager::GetVideoCaptureDevices(std::vector<std::string>* names) {
+ names->clear();
+ std::vector<Device> devs;
+ bool ret = device_manager_->GetVideoCaptureDevices(&devs);
+ if (ret)
+ GetDeviceNames(devs, names);
+
+ return ret;
+}
+
+void ChannelManager::SetVideoCaptureDeviceMaxFormat(
+ const std::string& usb_id,
+ const VideoFormat& max_format) {
+ device_manager_->SetVideoCaptureDeviceMaxFormat(usb_id, max_format);
+}
+
+VideoFormat ChannelManager::GetStartCaptureFormat() {
+ return worker_thread_->Invoke<VideoFormat>(
+ Bind(&MediaEngineInterface::GetStartCaptureFormat, media_engine_.get()));
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/channelmanager.h b/chromium/third_party/libjingle/source/talk/session/media/channelmanager.h
new file mode 100644
index 00000000000..b1967bfcd50
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/channelmanager.h
@@ -0,0 +1,311 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_CHANNELMANAGER_H_
+#define TALK_SESSION_MEDIA_CHANNELMANAGER_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/base/criticalsection.h"
+#include "talk/base/sigslotrepeater.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/capturemanager.h"
+#include "talk/media/base/mediaengine.h"
+#include "talk/p2p/base/session.h"
+#include "talk/session/media/voicechannel.h"
+
+namespace cricket {
+
+class Soundclip;
+class VideoProcessor;
+class VoiceChannel;
+class VoiceProcessor;
+
+// ChannelManager allows the MediaEngine to run on a separate thread, and takes
+// care of marshalling calls between threads. It also creates and keeps track of
+// voice and video channels; by doing so, it can temporarily pause all the
+// channels when a new audio or video device is chosen. The voice and video
+// channels are stored in separate vectors, to easily allow operations on just
+// voice or just video channels.
+// ChannelManager also allows the application to discover what devices it has
+// using device manager.
+class ChannelManager : public talk_base::MessageHandler,
+ public sigslot::has_slots<> {
+ public:
+#if !defined(DISABLE_MEDIA_ENGINE_FACTORY)
+ // Creates the channel manager, and specifies the worker thread to use.
+ explicit ChannelManager(talk_base::Thread* worker);
+#endif
+
+ // For testing purposes. Allows the media engine and data media
+ // engine and dev manager to be mocks. The ChannelManager takes
+ // ownership of these objects.
+ ChannelManager(MediaEngineInterface* me,
+ DataEngineInterface* dme,
+ DeviceManagerInterface* dm,
+ CaptureManager* cm,
+ talk_base::Thread* worker);
+ // Same as above, but gives an easier default DataEngine.
+ ChannelManager(MediaEngineInterface* me,
+ DeviceManagerInterface* dm,
+ talk_base::Thread* worker);
+ ~ChannelManager();
+
+ // Accessors for the worker thread, allowing it to be set after construction,
+ // but before Init. set_worker_thread will return false if called after Init.
+ talk_base::Thread* worker_thread() const { return worker_thread_; }
+ bool set_worker_thread(talk_base::Thread* thread) {
+ if (initialized_) return false;
+ worker_thread_ = thread;
+ return true;
+ }
+
+ // Gets capabilities. Can be called prior to starting the media engine.
+ int GetCapabilities();
+
+ // Retrieves the list of supported audio & video codec types.
+ // Can be called before starting the media engine.
+ void GetSupportedAudioCodecs(std::vector<AudioCodec>* codecs) const;
+ void GetSupportedAudioRtpHeaderExtensions(RtpHeaderExtensions* ext) const;
+ void GetSupportedVideoCodecs(std::vector<VideoCodec>* codecs) const;
+ void GetSupportedVideoRtpHeaderExtensions(RtpHeaderExtensions* ext) const;
+ void GetSupportedDataCodecs(std::vector<DataCodec>* codecs) const;
+
+ // Indicates whether the media engine is started.
+ bool initialized() const { return initialized_; }
+ // Starts up the media engine.
+ bool Init();
+ // Shuts down the media engine.
+ void Terminate();
+
+ // The operations below all occur on the worker thread.
+
+ // Creates a voice channel, to be associated with the specified session.
+ VoiceChannel* CreateVoiceChannel(
+ BaseSession* session, const std::string& content_name, bool rtcp);
+ // Destroys a voice channel created with the Create API.
+ void DestroyVoiceChannel(VoiceChannel* voice_channel);
+ // Creates a video channel, synced with the specified voice channel, and
+ // associated with the specified session.
+ VideoChannel* CreateVideoChannel(
+ BaseSession* session, const std::string& content_name, bool rtcp,
+ VoiceChannel* voice_channel);
+ // Destroys a video channel created with the Create API.
+ void DestroyVideoChannel(VideoChannel* video_channel);
+ DataChannel* CreateDataChannel(
+ BaseSession* session, const std::string& content_name,
+ bool rtcp, DataChannelType data_channel_type);
+ // Destroys a data channel created with the Create API.
+ void DestroyDataChannel(DataChannel* data_channel);
+
+ // Creates a soundclip.
+ Soundclip* CreateSoundclip();
+ // Destroys a soundclip created with the Create API.
+ void DestroySoundclip(Soundclip* soundclip);
+
+ // Indicates whether any channels exist.
+ bool has_channels() const {
+ return (!voice_channels_.empty() || !video_channels_.empty() ||
+ !soundclips_.empty());
+ }
+
+ // Configures the audio and video devices. A null pointer can be passed to
+ // GetAudioOptions() for any parameter of no interest.
+ bool GetAudioOptions(std::string* wave_in_device,
+ std::string* wave_out_device, int* opts);
+ bool SetAudioOptions(const std::string& wave_in_device,
+ const std::string& wave_out_device, int opts);
+ bool GetOutputVolume(int* level);
+ bool SetOutputVolume(int level);
+ bool IsSameCapturer(const std::string& capturer_name,
+ VideoCapturer* capturer);
+ // TODO(noahric): Nearly everything called "device" in this API is actually a
+ // device name, so this should really be GetCaptureDeviceName, and the
+ // next method should be GetCaptureDevice.
+ bool GetCaptureDevice(std::string* cam_device);
+ // Gets the current capture Device.
+ bool GetVideoCaptureDevice(Device* device);
+ // Create capturer based on what has been set in SetCaptureDevice().
+ VideoCapturer* CreateVideoCapturer();
+ bool SetCaptureDevice(const std::string& cam_device);
+ bool SetDefaultVideoEncoderConfig(const VideoEncoderConfig& config);
+ // RTX will be enabled/disabled in engines that support it. The supporting
+ // engines will start offering an RTX codec. Must be called before Init().
+ bool SetVideoRtxEnabled(bool enable);
+
+ // Starts/stops the local microphone and enables polling of the input level.
+ bool SetLocalMonitor(bool enable);
+ bool monitoring() const { return monitoring_; }
+ // Sets the local renderer where to renderer the local camera.
+ bool SetLocalRenderer(VideoRenderer* renderer);
+ // Sets the externally provided video capturer. The ssrc is the ssrc of the
+ // (video) stream for which the video capturer should be set.
+ bool SetVideoCapturer(VideoCapturer* capturer);
+ bool capturing() const { return capturing_; }
+
+ // Configures the logging output of the mediaengine(s).
+ void SetVoiceLogging(int level, const char* filter);
+ void SetVideoLogging(int level, const char* filter);
+
+ // The channel manager handles the Tx side for Video processing,
+ // as well as Tx and Rx side for Voice processing.
+ // (The Rx Video processing will go throug the simplerenderingmanager,
+ // to be implemented).
+ bool RegisterVideoProcessor(VideoCapturer* capturer,
+ VideoProcessor* processor);
+ bool UnregisterVideoProcessor(VideoCapturer* capturer,
+ VideoProcessor* processor);
+ bool RegisterVoiceProcessor(uint32 ssrc,
+ VoiceProcessor* processor,
+ MediaProcessorDirection direction);
+ bool UnregisterVoiceProcessor(uint32 ssrc,
+ VoiceProcessor* processor,
+ MediaProcessorDirection direction);
+ // The following are done in the new "CaptureManager" style that
+ // all local video capturers, processors, and managers should move to.
+ // TODO(pthatcher): Make methods nicer by having start return a handle that
+ // can be used for stop and restart, rather than needing to pass around
+ // formats a a pseudo-handle.
+ bool StartVideoCapture(VideoCapturer* video_capturer,
+ const VideoFormat& video_format);
+ // When muting, produce black frames then pause the camera.
+ // When unmuting, start the camera. Camera starts unmuted.
+ bool MuteToBlackThenPause(VideoCapturer* video_capturer, bool muted);
+ bool StopVideoCapture(VideoCapturer* video_capturer,
+ const VideoFormat& video_format);
+ bool RestartVideoCapture(VideoCapturer* video_capturer,
+ const VideoFormat& previous_format,
+ const VideoFormat& desired_format,
+ CaptureManager::RestartOptions options);
+
+ bool AddVideoRenderer(VideoCapturer* capturer, VideoRenderer* renderer);
+ bool RemoveVideoRenderer(VideoCapturer* capturer, VideoRenderer* renderer);
+ bool IsScreencastRunning() const;
+
+ // The operations below occur on the main thread.
+
+ bool GetAudioInputDevices(std::vector<std::string>* names);
+ bool GetAudioOutputDevices(std::vector<std::string>* names);
+ bool GetVideoCaptureDevices(std::vector<std::string>* names);
+ void SetVideoCaptureDeviceMaxFormat(const std::string& usb_id,
+ const VideoFormat& max_format);
+
+ sigslot::repeater0<> SignalDevicesChange;
+ sigslot::signal2<VideoCapturer*, CaptureState> SignalVideoCaptureStateChange;
+
+ // Returns the current selected device. Note: Subtly different from
+ // GetCaptureDevice(). See member video_device_ for more details.
+ // This API is mainly a hook used by unittests.
+ const std::string& video_device_name() const { return video_device_name_; }
+
+ // TODO(hellner): Remove this function once the engine capturer has been
+ // removed.
+ VideoFormat GetStartCaptureFormat();
+ protected:
+ // Adds non-transient parameters which can only be changed through the
+ // options store.
+ bool SetAudioOptions(const std::string& wave_in_device,
+ const std::string& wave_out_device, int opts,
+ int delay_offset);
+ int audio_delay_offset() const { return audio_delay_offset_; }
+
+ private:
+ typedef std::vector<VoiceChannel*> VoiceChannels;
+ typedef std::vector<VideoChannel*> VideoChannels;
+ typedef std::vector<DataChannel*> DataChannels;
+ typedef std::vector<Soundclip*> Soundclips;
+
+ void Construct(MediaEngineInterface* me,
+ DataEngineInterface* dme,
+ DeviceManagerInterface* dm,
+ CaptureManager* cm,
+ talk_base::Thread* worker_thread);
+ void Terminate_w();
+ VoiceChannel* CreateVoiceChannel_w(
+ BaseSession* session, const std::string& content_name, bool rtcp);
+ void DestroyVoiceChannel_w(VoiceChannel* voice_channel);
+ VideoChannel* CreateVideoChannel_w(
+ BaseSession* session, const std::string& content_name, bool rtcp,
+ VoiceChannel* voice_channel);
+ void DestroyVideoChannel_w(VideoChannel* video_channel);
+ DataChannel* CreateDataChannel_w(
+ BaseSession* session, const std::string& content_name,
+ bool rtcp, DataChannelType data_channel_type);
+ void DestroyDataChannel_w(DataChannel* data_channel);
+ Soundclip* CreateSoundclip_w();
+ void DestroySoundclip_w(Soundclip* soundclip);
+ bool SetAudioOptions_w(int opts, int delay_offset, const Device* in_dev,
+ const Device* out_dev);
+ bool SetCaptureDevice_w(const Device* cam_device);
+ void OnVideoCaptureStateChange(VideoCapturer* capturer,
+ CaptureState result);
+ bool RegisterVideoProcessor_w(VideoCapturer* capturer,
+ VideoProcessor* processor);
+ bool UnregisterVideoProcessor_w(VideoCapturer* capturer,
+ VideoProcessor* processor);
+ bool IsScreencastRunning_w() const;
+ virtual void OnMessage(talk_base::Message *message);
+
+ talk_base::scoped_ptr<MediaEngineInterface> media_engine_;
+ talk_base::scoped_ptr<DataEngineInterface> data_media_engine_;
+ talk_base::scoped_ptr<DeviceManagerInterface> device_manager_;
+ talk_base::scoped_ptr<CaptureManager> capture_manager_;
+ bool initialized_;
+ talk_base::Thread* main_thread_;
+ talk_base::Thread* worker_thread_;
+
+ VoiceChannels voice_channels_;
+ VideoChannels video_channels_;
+ DataChannels data_channels_;
+ Soundclips soundclips_;
+
+ std::string audio_in_device_;
+ std::string audio_out_device_;
+ int audio_options_;
+ int audio_delay_offset_;
+ int audio_output_volume_;
+ std::string camera_device_;
+ VideoEncoderConfig default_video_encoder_config_;
+ VideoRenderer* local_renderer_;
+ bool enable_rtx_;
+
+ bool capturing_;
+ bool monitoring_;
+
+ // String containing currently set device. Note that this string is subtly
+ // different from camera_device_. E.g. camera_device_ will list unplugged
+ // but selected devices while this sting will be empty or contain current
+ // selected device.
+ // TODO(hellner): refactor the code such that there is no need to keep two
+ // strings for video devices that have subtle differences in behavior.
+ std::string video_device_name_;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_CHANNELMANAGER_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/channelmanager_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/channelmanager_unittest.cc
new file mode 100644
index 00000000000..32321ebcd01
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/channelmanager_unittest.cc
@@ -0,0 +1,596 @@
+// libjingle
+// Copyright 2008 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "talk/base/gunit.h"
+#include "talk/base/logging.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/fakecapturemanager.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/media/base/fakemediaprocessor.h"
+#include "talk/media/base/nullvideorenderer.h"
+#include "talk/media/devices/fakedevicemanager.h"
+#include "talk/media/base/testutils.h"
+#include "talk/p2p/base/fakesession.h"
+#include "talk/session/media/channelmanager.h"
+
+namespace cricket {
+
+static const AudioCodec kAudioCodecs[] = {
+ AudioCodec(97, "voice", 1, 2, 3, 0),
+ AudioCodec(110, "CELT", 32000, 48000, 2, 0),
+ AudioCodec(111, "OPUS", 48000, 32000, 2, 0),
+};
+
+static const VideoCodec kVideoCodecs[] = {
+ VideoCodec(99, "H264", 100, 200, 300, 0),
+ VideoCodec(100, "VP8", 100, 200, 300, 0),
+ VideoCodec(96, "rtx", 100, 200, 300, 0),
+};
+
+class ChannelManagerTest : public testing::Test {
+ protected:
+ ChannelManagerTest() : fme_(NULL), fdm_(NULL), fcm_(NULL), cm_(NULL) {
+ }
+
+ virtual void SetUp() {
+ fme_ = new cricket::FakeMediaEngine();
+ fme_->SetAudioCodecs(MAKE_VECTOR(kAudioCodecs));
+ fme_->SetVideoCodecs(MAKE_VECTOR(kVideoCodecs));
+ fdme_ = new cricket::FakeDataEngine();
+ fdm_ = new cricket::FakeDeviceManager();
+ fcm_ = new cricket::FakeCaptureManager();
+ cm_ = new cricket::ChannelManager(
+ fme_, fdme_, fdm_, fcm_, talk_base::Thread::Current());
+ session_ = new cricket::FakeSession(true);
+
+ std::vector<std::string> in_device_list, out_device_list, vid_device_list;
+ in_device_list.push_back("audio-in1");
+ in_device_list.push_back("audio-in2");
+ out_device_list.push_back("audio-out1");
+ out_device_list.push_back("audio-out2");
+ vid_device_list.push_back("video-in1");
+ vid_device_list.push_back("video-in2");
+ fdm_->SetAudioInputDevices(in_device_list);
+ fdm_->SetAudioOutputDevices(out_device_list);
+ fdm_->SetVideoCaptureDevices(vid_device_list);
+ }
+
+ virtual void TearDown() {
+ delete session_;
+ delete cm_;
+ cm_ = NULL;
+ fdm_ = NULL;
+ fcm_ = NULL;
+ fdme_ = NULL;
+ fme_ = NULL;
+ }
+
+ talk_base::Thread worker_;
+ cricket::FakeMediaEngine* fme_;
+ cricket::FakeDataEngine* fdme_;
+ cricket::FakeDeviceManager* fdm_;
+ cricket::FakeCaptureManager* fcm_;
+ cricket::ChannelManager* cm_;
+ cricket::FakeSession* session_;
+};
+
+// Test that we startup/shutdown properly.
+TEST_F(ChannelManagerTest, StartupShutdown) {
+ EXPECT_FALSE(cm_->initialized());
+ EXPECT_EQ(talk_base::Thread::Current(), cm_->worker_thread());
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->initialized());
+ cm_->Terminate();
+ EXPECT_FALSE(cm_->initialized());
+}
+
+// Test that we startup/shutdown properly with a worker thread.
+TEST_F(ChannelManagerTest, StartupShutdownOnThread) {
+ worker_.Start();
+ EXPECT_FALSE(cm_->initialized());
+ EXPECT_EQ(talk_base::Thread::Current(), cm_->worker_thread());
+ EXPECT_TRUE(cm_->set_worker_thread(&worker_));
+ EXPECT_EQ(&worker_, cm_->worker_thread());
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->initialized());
+ // Setting the worker thread while initialized should fail.
+ EXPECT_FALSE(cm_->set_worker_thread(talk_base::Thread::Current()));
+ cm_->Terminate();
+ EXPECT_FALSE(cm_->initialized());
+}
+
+// Test that we fail to startup if we're given an unstarted thread.
+TEST_F(ChannelManagerTest, StartupShutdownOnUnstartedThread) {
+ EXPECT_TRUE(cm_->set_worker_thread(&worker_));
+ EXPECT_FALSE(cm_->Init());
+ EXPECT_FALSE(cm_->initialized());
+}
+
+// Test that we can create and destroy a voice and video channel.
+TEST_F(ChannelManagerTest, CreateDestroyChannels) {
+ EXPECT_TRUE(cm_->Init());
+ cricket::VoiceChannel* voice_channel = cm_->CreateVoiceChannel(
+ session_, cricket::CN_AUDIO, false);
+ EXPECT_TRUE(voice_channel != NULL);
+ cricket::VideoChannel* video_channel =
+ cm_->CreateVideoChannel(session_, cricket::CN_VIDEO,
+ false, voice_channel);
+ EXPECT_TRUE(video_channel != NULL);
+ cricket::DataChannel* data_channel =
+ cm_->CreateDataChannel(session_, cricket::CN_DATA,
+ false, cricket::DCT_RTP);
+ EXPECT_TRUE(data_channel != NULL);
+ cm_->DestroyVideoChannel(video_channel);
+ cm_->DestroyVoiceChannel(voice_channel);
+ cm_->DestroyDataChannel(data_channel);
+ cm_->Terminate();
+}
+
+// Test that we can create and destroy a voice and video channel with a worker.
+TEST_F(ChannelManagerTest, CreateDestroyChannelsOnThread) {
+ worker_.Start();
+ EXPECT_TRUE(cm_->set_worker_thread(&worker_));
+ EXPECT_TRUE(cm_->Init());
+ cricket::VoiceChannel* voice_channel = cm_->CreateVoiceChannel(
+ session_, cricket::CN_AUDIO, false);
+ EXPECT_TRUE(voice_channel != NULL);
+ cricket::VideoChannel* video_channel =
+ cm_->CreateVideoChannel(session_, cricket::CN_VIDEO,
+ false, voice_channel);
+ EXPECT_TRUE(video_channel != NULL);
+ cricket::DataChannel* data_channel =
+ cm_->CreateDataChannel(session_, cricket::CN_DATA,
+ false, cricket::DCT_RTP);
+ EXPECT_TRUE(data_channel != NULL);
+ cm_->DestroyVideoChannel(video_channel);
+ cm_->DestroyVoiceChannel(voice_channel);
+ cm_->DestroyDataChannel(data_channel);
+ cm_->Terminate();
+}
+
+// Test that we fail to create a voice/video channel if the session is unable
+// to create a cricket::TransportChannel
+TEST_F(ChannelManagerTest, NoTransportChannelTest) {
+ EXPECT_TRUE(cm_->Init());
+ session_->set_fail_channel_creation(true);
+ // The test is useless unless the session does not fail creating
+ // cricket::TransportChannel.
+ ASSERT_TRUE(session_->CreateChannel(
+ "audio", "rtp", cricket::ICE_CANDIDATE_COMPONENT_RTP) == NULL);
+
+ cricket::VoiceChannel* voice_channel = cm_->CreateVoiceChannel(
+ session_, cricket::CN_AUDIO, false);
+ EXPECT_TRUE(voice_channel == NULL);
+ cricket::VideoChannel* video_channel =
+ cm_->CreateVideoChannel(session_, cricket::CN_VIDEO,
+ false, voice_channel);
+ EXPECT_TRUE(video_channel == NULL);
+ cricket::DataChannel* data_channel =
+ cm_->CreateDataChannel(session_, cricket::CN_DATA,
+ false, cricket::DCT_RTP);
+ EXPECT_TRUE(data_channel == NULL);
+ cm_->Terminate();
+}
+
+// Test that SetDefaultVideoCodec passes through the right values.
+TEST_F(ChannelManagerTest, SetDefaultVideoEncoderConfig) {
+ cricket::VideoCodec codec(96, "G264", 1280, 720, 60, 0);
+ cricket::VideoEncoderConfig config(codec, 1, 2);
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->SetDefaultVideoEncoderConfig(config));
+ EXPECT_EQ(config, fme_->default_video_encoder_config());
+}
+
+// Test that SetDefaultVideoCodec passes through the right values.
+TEST_F(ChannelManagerTest, SetDefaultVideoCodecBeforeInit) {
+ cricket::VideoCodec codec(96, "G264", 1280, 720, 60, 0);
+ cricket::VideoEncoderConfig config(codec, 1, 2);
+ EXPECT_TRUE(cm_->SetDefaultVideoEncoderConfig(config));
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_EQ(config, fme_->default_video_encoder_config());
+}
+
+TEST_F(ChannelManagerTest, SetAudioOptionsBeforeInit) {
+ // Test that values that we set before Init are applied.
+ EXPECT_TRUE(cm_->SetAudioOptions("audio-in1", "audio-out1", 0x2));
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_EQ("audio-in1", fme_->audio_in_device());
+ EXPECT_EQ("audio-out1", fme_->audio_out_device());
+ EXPECT_EQ(0x2, fme_->audio_options());
+ EXPECT_EQ(0, fme_->audio_delay_offset());
+ EXPECT_EQ(cricket::MediaEngineInterface::kDefaultAudioDelayOffset,
+ fme_->audio_delay_offset());
+}
+
+TEST_F(ChannelManagerTest, GetAudioOptionsBeforeInit) {
+ std::string audio_in, audio_out;
+ int opts;
+ // Test that GetAudioOptions works before Init.
+ EXPECT_TRUE(cm_->SetAudioOptions("audio-in2", "audio-out2", 0x1));
+ EXPECT_TRUE(cm_->GetAudioOptions(&audio_in, &audio_out, &opts));
+ EXPECT_EQ("audio-in2", audio_in);
+ EXPECT_EQ("audio-out2", audio_out);
+ EXPECT_EQ(0x1, opts);
+ // Test that options set before Init can be gotten after Init.
+ EXPECT_TRUE(cm_->SetAudioOptions("audio-in1", "audio-out1", 0x2));
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->GetAudioOptions(&audio_in, &audio_out, &opts));
+ EXPECT_EQ("audio-in1", audio_in);
+ EXPECT_EQ("audio-out1", audio_out);
+ EXPECT_EQ(0x2, opts);
+}
+
+TEST_F(ChannelManagerTest, GetAudioOptionsWithNullParameters) {
+ std::string audio_in, audio_out;
+ int opts;
+ EXPECT_TRUE(cm_->SetAudioOptions("audio-in2", "audio-out2", 0x1));
+ EXPECT_TRUE(cm_->GetAudioOptions(&audio_in, NULL, NULL));
+ EXPECT_EQ("audio-in2", audio_in);
+ EXPECT_TRUE(cm_->GetAudioOptions(NULL, &audio_out, NULL));
+ EXPECT_EQ("audio-out2", audio_out);
+ EXPECT_TRUE(cm_->GetAudioOptions(NULL, NULL, &opts));
+ EXPECT_EQ(0x1, opts);
+}
+
+TEST_F(ChannelManagerTest, SetAudioOptions) {
+ // Test initial state.
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_EQ(std::string(cricket::DeviceManagerInterface::kDefaultDeviceName),
+ fme_->audio_in_device());
+ EXPECT_EQ(std::string(cricket::DeviceManagerInterface::kDefaultDeviceName),
+ fme_->audio_out_device());
+ EXPECT_EQ(cricket::MediaEngineInterface::DEFAULT_AUDIO_OPTIONS,
+ fme_->audio_options());
+ EXPECT_EQ(cricket::MediaEngineInterface::kDefaultAudioDelayOffset,
+ fme_->audio_delay_offset());
+ // Test setting defaults.
+ EXPECT_TRUE(cm_->SetAudioOptions("", "",
+ cricket::MediaEngineInterface::DEFAULT_AUDIO_OPTIONS));
+ EXPECT_EQ("", fme_->audio_in_device());
+ EXPECT_EQ("", fme_->audio_out_device());
+ EXPECT_EQ(cricket::MediaEngineInterface::DEFAULT_AUDIO_OPTIONS,
+ fme_->audio_options());
+ EXPECT_EQ(cricket::MediaEngineInterface::kDefaultAudioDelayOffset,
+ fme_->audio_delay_offset());
+ // Test setting specific values.
+ EXPECT_TRUE(cm_->SetAudioOptions("audio-in1", "audio-out1", 0x2));
+ EXPECT_EQ("audio-in1", fme_->audio_in_device());
+ EXPECT_EQ("audio-out1", fme_->audio_out_device());
+ EXPECT_EQ(0x2, fme_->audio_options());
+ EXPECT_EQ(cricket::MediaEngineInterface::kDefaultAudioDelayOffset,
+ fme_->audio_delay_offset());
+ // Test setting bad values.
+ EXPECT_FALSE(cm_->SetAudioOptions("audio-in9", "audio-out2", 0x1));
+}
+
+TEST_F(ChannelManagerTest, GetAudioOptions) {
+ std::string audio_in, audio_out;
+ int opts;
+ // Test initial state.
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->GetAudioOptions(&audio_in, &audio_out, &opts));
+ EXPECT_EQ(std::string(cricket::DeviceManagerInterface::kDefaultDeviceName),
+ audio_in);
+ EXPECT_EQ(std::string(cricket::DeviceManagerInterface::kDefaultDeviceName),
+ audio_out);
+ EXPECT_EQ(cricket::MediaEngineInterface::DEFAULT_AUDIO_OPTIONS, opts);
+ // Test that we get back specific values that we set.
+ EXPECT_TRUE(cm_->SetAudioOptions("audio-in1", "audio-out1", 0x2));
+ EXPECT_TRUE(cm_->GetAudioOptions(&audio_in, &audio_out, &opts));
+ EXPECT_EQ("audio-in1", audio_in);
+ EXPECT_EQ("audio-out1", audio_out);
+ EXPECT_EQ(0x2, opts);
+}
+
+TEST_F(ChannelManagerTest, SetCaptureDeviceBeforeInit) {
+ // Test that values that we set before Init are applied.
+ EXPECT_TRUE(cm_->SetCaptureDevice("video-in2"));
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_EQ("video-in2", cm_->video_device_name());
+}
+
+TEST_F(ChannelManagerTest, GetCaptureDeviceBeforeInit) {
+ std::string video_in;
+ // Test that GetCaptureDevice works before Init.
+ EXPECT_TRUE(cm_->SetCaptureDevice("video-in1"));
+ EXPECT_TRUE(cm_->GetCaptureDevice(&video_in));
+ EXPECT_EQ("video-in1", video_in);
+ // Test that options set before Init can be gotten after Init.
+ EXPECT_TRUE(cm_->SetCaptureDevice("video-in2"));
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->GetCaptureDevice(&video_in));
+ EXPECT_EQ("video-in2", video_in);
+}
+
+TEST_F(ChannelManagerTest, SetCaptureDevice) {
+ // Test setting defaults.
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->SetCaptureDevice("")); // will use DeviceManager default
+ EXPECT_EQ("video-in1", cm_->video_device_name());
+ // Test setting specific values.
+ EXPECT_TRUE(cm_->SetCaptureDevice("video-in2"));
+ EXPECT_EQ("video-in2", cm_->video_device_name());
+ // TODO(juberti): Add test for invalid value here.
+}
+
+// Test unplugging and plugging back the preferred devices. When the preferred
+// device is unplugged, we fall back to the default device. When the preferred
+// device is plugged back, we use it.
+TEST_F(ChannelManagerTest, SetAudioOptionsUnplugPlug) {
+ // Set preferences "audio-in1" and "audio-out1" before init.
+ EXPECT_TRUE(cm_->SetAudioOptions("audio-in1", "audio-out1", 0x2));
+ // Unplug device "audio-in1" and "audio-out1".
+ std::vector<std::string> in_device_list, out_device_list;
+ in_device_list.push_back("audio-in2");
+ out_device_list.push_back("audio-out2");
+ fdm_->SetAudioInputDevices(in_device_list);
+ fdm_->SetAudioOutputDevices(out_device_list);
+ // Init should fall back to default devices.
+ EXPECT_TRUE(cm_->Init());
+ // The media engine should use the default.
+ EXPECT_EQ("", fme_->audio_in_device());
+ EXPECT_EQ("", fme_->audio_out_device());
+ // The channel manager keeps the preferences "audio-in1" and "audio-out1".
+ std::string audio_in, audio_out;
+ EXPECT_TRUE(cm_->GetAudioOptions(&audio_in, &audio_out, NULL));
+ EXPECT_EQ("audio-in1", audio_in);
+ EXPECT_EQ("audio-out1", audio_out);
+ cm_->Terminate();
+
+ // Plug devices "audio-in2" and "audio-out2" back.
+ in_device_list.push_back("audio-in1");
+ out_device_list.push_back("audio-out1");
+ fdm_->SetAudioInputDevices(in_device_list);
+ fdm_->SetAudioOutputDevices(out_device_list);
+ // Init again. The preferences, "audio-in2" and "audio-out2", are used.
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_EQ("audio-in1", fme_->audio_in_device());
+ EXPECT_EQ("audio-out1", fme_->audio_out_device());
+ EXPECT_TRUE(cm_->GetAudioOptions(&audio_in, &audio_out, NULL));
+ EXPECT_EQ("audio-in1", audio_in);
+ EXPECT_EQ("audio-out1", audio_out);
+}
+
+// We have one camera. Unplug it, fall back to no camera.
+TEST_F(ChannelManagerTest, SetCaptureDeviceUnplugPlugOneCamera) {
+ // Set preferences "video-in1" before init.
+ std::vector<std::string> vid_device_list;
+ vid_device_list.push_back("video-in1");
+ fdm_->SetVideoCaptureDevices(vid_device_list);
+ EXPECT_TRUE(cm_->SetCaptureDevice("video-in1"));
+
+ // Unplug "video-in1".
+ vid_device_list.clear();
+ fdm_->SetVideoCaptureDevices(vid_device_list);
+
+ // Init should fall back to avatar.
+ EXPECT_TRUE(cm_->Init());
+ // The media engine should use no camera.
+ EXPECT_EQ("", cm_->video_device_name());
+ // The channel manager keeps the user preference "video-in".
+ std::string video_in;
+ EXPECT_TRUE(cm_->GetCaptureDevice(&video_in));
+ EXPECT_EQ("video-in1", video_in);
+ cm_->Terminate();
+
+ // Plug device "video-in1" back.
+ vid_device_list.push_back("video-in1");
+ fdm_->SetVideoCaptureDevices(vid_device_list);
+ // Init again. The user preferred device, "video-in1", is used.
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_EQ("video-in1", cm_->video_device_name());
+ EXPECT_TRUE(cm_->GetCaptureDevice(&video_in));
+ EXPECT_EQ("video-in1", video_in);
+}
+
+// We have multiple cameras. Unplug the preferred, fall back to another camera.
+TEST_F(ChannelManagerTest, SetCaptureDeviceUnplugPlugTwoDevices) {
+ // Set video device to "video-in1" before init.
+ EXPECT_TRUE(cm_->SetCaptureDevice("video-in1"));
+ // Unplug device "video-in1".
+ std::vector<std::string> vid_device_list;
+ vid_device_list.push_back("video-in2");
+ fdm_->SetVideoCaptureDevices(vid_device_list);
+ // Init should fall back to default device "video-in2".
+ EXPECT_TRUE(cm_->Init());
+ // The media engine should use the default device "video-in2".
+ EXPECT_EQ("video-in2", cm_->video_device_name());
+ // The channel manager keeps the user preference "video-in".
+ std::string video_in;
+ EXPECT_TRUE(cm_->GetCaptureDevice(&video_in));
+ EXPECT_EQ("video-in1", video_in);
+ cm_->Terminate();
+
+ // Plug device "video-in1" back.
+ vid_device_list.push_back("video-in1");
+ fdm_->SetVideoCaptureDevices(vid_device_list);
+ // Init again. The user preferred device, "video-in1", is used.
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_EQ("video-in1", cm_->video_device_name());
+ EXPECT_TRUE(cm_->GetCaptureDevice(&video_in));
+ EXPECT_EQ("video-in1", video_in);
+}
+
+TEST_F(ChannelManagerTest, GetCaptureDevice) {
+ std::string video_in;
+ // Test setting/getting defaults.
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->SetCaptureDevice(""));
+ EXPECT_TRUE(cm_->GetCaptureDevice(&video_in));
+ EXPECT_EQ("video-in1", video_in);
+ // Test setting/getting specific values.
+ EXPECT_TRUE(cm_->SetCaptureDevice("video-in2"));
+ EXPECT_TRUE(cm_->GetCaptureDevice(&video_in));
+ EXPECT_EQ("video-in2", video_in);
+}
+
+TEST_F(ChannelManagerTest, GetSetOutputVolumeBeforeInit) {
+ int level;
+ // Before init, SetOutputVolume() remembers the volume but does not change the
+ // volume of the engine. GetOutputVolume() should fail.
+ EXPECT_EQ(-1, fme_->output_volume());
+ EXPECT_FALSE(cm_->GetOutputVolume(&level));
+ EXPECT_FALSE(cm_->SetOutputVolume(-1)); // Invalid volume.
+ EXPECT_TRUE(cm_->SetOutputVolume(99));
+ EXPECT_EQ(-1, fme_->output_volume());
+
+ // Init() will apply the remembered volume.
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->GetOutputVolume(&level));
+ EXPECT_EQ(99, level);
+ EXPECT_EQ(level, fme_->output_volume());
+
+ EXPECT_TRUE(cm_->SetOutputVolume(60));
+ EXPECT_TRUE(cm_->GetOutputVolume(&level));
+ EXPECT_EQ(60, level);
+ EXPECT_EQ(level, fme_->output_volume());
+}
+
+TEST_F(ChannelManagerTest, GetSetOutputVolume) {
+ int level;
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->GetOutputVolume(&level));
+ EXPECT_EQ(level, fme_->output_volume());
+
+ EXPECT_FALSE(cm_->SetOutputVolume(-1)); // Invalid volume.
+ EXPECT_TRUE(cm_->SetOutputVolume(60));
+ EXPECT_EQ(60, fme_->output_volume());
+ EXPECT_TRUE(cm_->GetOutputVolume(&level));
+ EXPECT_EQ(60, level);
+}
+
+// Test that a value set before Init is applied properly.
+TEST_F(ChannelManagerTest, SetLocalRendererBeforeInit) {
+ cricket::NullVideoRenderer renderer;
+ EXPECT_TRUE(cm_->SetLocalRenderer(&renderer));
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_EQ(&renderer, fme_->local_renderer());
+}
+
+// Test that a value set after init is passed through properly.
+TEST_F(ChannelManagerTest, SetLocalRenderer) {
+ cricket::NullVideoRenderer renderer;
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_TRUE(cm_->SetLocalRenderer(&renderer));
+ EXPECT_EQ(&renderer, fme_->local_renderer());
+}
+
+// Test that logging options set before Init are applied properly,
+// and retained even after Init.
+TEST_F(ChannelManagerTest, SetLoggingBeforeInit) {
+ cm_->SetVoiceLogging(talk_base::LS_INFO, "test-voice");
+ cm_->SetVideoLogging(talk_base::LS_VERBOSE, "test-video");
+ EXPECT_EQ(talk_base::LS_INFO, fme_->voice_loglevel());
+ EXPECT_STREQ("test-voice", fme_->voice_logfilter().c_str());
+ EXPECT_EQ(talk_base::LS_VERBOSE, fme_->video_loglevel());
+ EXPECT_STREQ("test-video", fme_->video_logfilter().c_str());
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_EQ(talk_base::LS_INFO, fme_->voice_loglevel());
+ EXPECT_STREQ("test-voice", fme_->voice_logfilter().c_str());
+ EXPECT_EQ(talk_base::LS_VERBOSE, fme_->video_loglevel());
+ EXPECT_STREQ("test-video", fme_->video_logfilter().c_str());
+}
+
+// Test that logging options set after Init are applied properly.
+TEST_F(ChannelManagerTest, SetLogging) {
+ EXPECT_TRUE(cm_->Init());
+ cm_->SetVoiceLogging(talk_base::LS_INFO, "test-voice");
+ cm_->SetVideoLogging(talk_base::LS_VERBOSE, "test-video");
+ EXPECT_EQ(talk_base::LS_INFO, fme_->voice_loglevel());
+ EXPECT_STREQ("test-voice", fme_->voice_logfilter().c_str());
+ EXPECT_EQ(talk_base::LS_VERBOSE, fme_->video_loglevel());
+ EXPECT_STREQ("test-video", fme_->video_logfilter().c_str());
+}
+
+// Test that the Video/Voice Processors register and unregister
+TEST_F(ChannelManagerTest, RegisterProcessors) {
+ cricket::FakeMediaProcessor fmp;
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
+
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
+
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
+
+ EXPECT_TRUE(cm_->RegisterVoiceProcessor(1,
+ &fmp,
+ cricket::MPD_RX));
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
+ EXPECT_TRUE(fme_->voice_processor_registered(cricket::MPD_RX));
+
+
+ EXPECT_TRUE(cm_->UnregisterVoiceProcessor(1,
+ &fmp,
+ cricket::MPD_RX));
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
+
+ EXPECT_TRUE(cm_->RegisterVoiceProcessor(1,
+ &fmp,
+ cricket::MPD_TX));
+ EXPECT_TRUE(fme_->voice_processor_registered(cricket::MPD_TX));
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
+
+ EXPECT_TRUE(cm_->UnregisterVoiceProcessor(1,
+ &fmp,
+ cricket::MPD_TX));
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_TX));
+ EXPECT_FALSE(fme_->voice_processor_registered(cricket::MPD_RX));
+}
+
+TEST_F(ChannelManagerTest, SetVideoRtxEnabled) {
+ std::vector<VideoCodec> codecs;
+ const VideoCodec rtx_codec(96, "rtx", 0, 0, 0, 0);
+
+ // By default RTX is disabled.
+ cm_->GetSupportedVideoCodecs(&codecs);
+ EXPECT_FALSE(ContainsMatchingCodec(codecs, rtx_codec));
+
+ // Enable and check.
+ EXPECT_TRUE(cm_->SetVideoRtxEnabled(true));
+ cm_->GetSupportedVideoCodecs(&codecs);
+ EXPECT_TRUE(ContainsMatchingCodec(codecs, rtx_codec));
+
+ // Disable and check.
+ EXPECT_TRUE(cm_->SetVideoRtxEnabled(false));
+ cm_->GetSupportedVideoCodecs(&codecs);
+ EXPECT_FALSE(ContainsMatchingCodec(codecs, rtx_codec));
+
+ // Cannot toggle rtx after initialization.
+ EXPECT_TRUE(cm_->Init());
+ EXPECT_FALSE(cm_->SetVideoRtxEnabled(true));
+ EXPECT_FALSE(cm_->SetVideoRtxEnabled(false));
+
+ // Can set again after terminate.
+ cm_->Terminate();
+ EXPECT_TRUE(cm_->SetVideoRtxEnabled(true));
+ cm_->GetSupportedVideoCodecs(&codecs);
+ EXPECT_TRUE(ContainsMatchingCodec(codecs, rtx_codec));
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor.cc b/chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor.cc
new file mode 100644
index 00000000000..1f3e0938fb9
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor.cc
@@ -0,0 +1,208 @@
+/*
+ * libjingle
+ * Copyright 2011 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/currentspeakermonitor.h"
+
+#include "talk/base/logging.h"
+#include "talk/session/media/call.h"
+
+namespace cricket {
+
+namespace {
+const int kMaxAudioLevel = 9;
+// To avoid overswitching, we disable switching for a period of time after a
+// switch is done.
+const int kDefaultMinTimeBetweenSwitches = 1000;
+}
+
+CurrentSpeakerMonitor::CurrentSpeakerMonitor(Call* call, BaseSession* session)
+ : started_(false),
+ call_(call),
+ session_(session),
+ current_speaker_ssrc_(0),
+ earliest_permitted_switch_time_(0),
+ min_time_between_switches_(kDefaultMinTimeBetweenSwitches) {
+}
+
+CurrentSpeakerMonitor::~CurrentSpeakerMonitor() {
+ Stop();
+}
+
+void CurrentSpeakerMonitor::Start() {
+ if (!started_) {
+ call_->SignalAudioMonitor.connect(
+ this, &CurrentSpeakerMonitor::OnAudioMonitor);
+ call_->SignalMediaStreamsUpdate.connect(
+ this, &CurrentSpeakerMonitor::OnMediaStreamsUpdate);
+
+ started_ = true;
+ }
+}
+
+void CurrentSpeakerMonitor::Stop() {
+ if (started_) {
+ call_->SignalAudioMonitor.disconnect(this);
+ call_->SignalMediaStreamsUpdate.disconnect(this);
+
+ started_ = false;
+ ssrc_to_speaking_state_map_.clear();
+ current_speaker_ssrc_ = 0;
+ earliest_permitted_switch_time_ = 0;
+ }
+}
+
+void CurrentSpeakerMonitor::set_min_time_between_switches(
+ uint32 min_time_between_switches) {
+ min_time_between_switches_ = min_time_between_switches;
+}
+
+void CurrentSpeakerMonitor::OnAudioMonitor(Call* call, const AudioInfo& info) {
+ std::map<uint32, int> active_ssrc_to_level_map;
+ cricket::AudioInfo::StreamList::const_iterator stream_list_it;
+ for (stream_list_it = info.active_streams.begin();
+ stream_list_it != info.active_streams.end(); ++stream_list_it) {
+ uint32 ssrc = stream_list_it->first;
+ active_ssrc_to_level_map[ssrc] = stream_list_it->second;
+
+ // It's possible we haven't yet added this source to our map. If so,
+ // add it now with a "not speaking" state.
+ if (ssrc_to_speaking_state_map_.find(ssrc) ==
+ ssrc_to_speaking_state_map_.end()) {
+ ssrc_to_speaking_state_map_[ssrc] = SS_NOT_SPEAKING;
+ }
+ }
+
+ int max_level = 0;
+ uint32 loudest_speaker_ssrc = 0;
+
+ // Update the speaking states of all participants based on the new audio
+ // level information. Also retain loudest speaker.
+ std::map<uint32, SpeakingState>::iterator state_it;
+ for (state_it = ssrc_to_speaking_state_map_.begin();
+ state_it != ssrc_to_speaking_state_map_.end(); ++state_it) {
+ bool is_previous_speaker = current_speaker_ssrc_ == state_it->first;
+
+ // This uses a state machine in order to gradually identify
+ // members as having started or stopped speaking. Matches the
+ // algorithm used by the hangouts js code.
+
+ std::map<uint32, int>::const_iterator level_it =
+ active_ssrc_to_level_map.find(state_it->first);
+ // Note that the stream map only contains streams with non-zero audio
+ // levels.
+ int level = (level_it != active_ssrc_to_level_map.end()) ?
+ level_it->second : 0;
+ switch (state_it->second) {
+ case SS_NOT_SPEAKING:
+ if (level > 0) {
+ // Reset level because we don't think they're really speaking.
+ level = 0;
+ state_it->second = SS_MIGHT_BE_SPEAKING;
+ } else {
+ // State unchanged.
+ }
+ break;
+ case SS_MIGHT_BE_SPEAKING:
+ if (level > 0) {
+ state_it->second = SS_SPEAKING;
+ } else {
+ state_it->second = SS_NOT_SPEAKING;
+ }
+ break;
+ case SS_SPEAKING:
+ if (level > 0) {
+ // State unchanged.
+ } else {
+ state_it->second = SS_WAS_SPEAKING_RECENTLY1;
+ if (is_previous_speaker) {
+ // Assume this is an inter-word silence and assign him the highest
+ // volume.
+ level = kMaxAudioLevel;
+ }
+ }
+ break;
+ case SS_WAS_SPEAKING_RECENTLY1:
+ if (level > 0) {
+ state_it->second = SS_SPEAKING;
+ } else {
+ state_it->second = SS_WAS_SPEAKING_RECENTLY2;
+ if (is_previous_speaker) {
+ // Assume this is an inter-word silence and assign him the highest
+ // volume.
+ level = kMaxAudioLevel;
+ }
+ }
+ break;
+ case SS_WAS_SPEAKING_RECENTLY2:
+ if (level > 0) {
+ state_it->second = SS_SPEAKING;
+ } else {
+ state_it->second = SS_NOT_SPEAKING;
+ }
+ break;
+ }
+
+ if (level > max_level) {
+ loudest_speaker_ssrc = state_it->first;
+ max_level = level;
+ } else if (level > 0 && level == max_level && is_previous_speaker) {
+ // Favor continuity of loudest speakers if audio levels are equal.
+ loudest_speaker_ssrc = state_it->first;
+ }
+ }
+
+ // We avoid over-switching by disabling switching for a period of time after
+ // a switch is done.
+ uint32 now = talk_base::Time();
+ if (earliest_permitted_switch_time_ <= now &&
+ current_speaker_ssrc_ != loudest_speaker_ssrc) {
+ current_speaker_ssrc_ = loudest_speaker_ssrc;
+ LOG(LS_INFO) << "Current speaker changed to " << current_speaker_ssrc_;
+ earliest_permitted_switch_time_ = now + min_time_between_switches_;
+ SignalUpdate(this, current_speaker_ssrc_);
+ }
+}
+
+void CurrentSpeakerMonitor::OnMediaStreamsUpdate(Call* call,
+ Session* session,
+ const MediaStreams& added,
+ const MediaStreams& removed) {
+ if (call == call_ && session == session_) {
+ // Update the speaking state map based on added and removed streams.
+ for (std::vector<cricket::StreamParams>::const_iterator
+ it = removed.video().begin(); it != removed.video().end(); ++it) {
+ ssrc_to_speaking_state_map_.erase(it->first_ssrc());
+ }
+
+ for (std::vector<cricket::StreamParams>::const_iterator
+ it = added.video().begin(); it != added.video().end(); ++it) {
+ ssrc_to_speaking_state_map_[it->first_ssrc()] = SS_NOT_SPEAKING;
+ }
+ }
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor.h b/chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor.h
new file mode 100644
index 00000000000..1781db58c49
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor.h
@@ -0,0 +1,100 @@
+/*
+ * libjingle
+ * Copyright 2011 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// CurrentSpeakerMonitor monitors the audio levels for a session and determines
+// which participant is currently speaking.
+
+#ifndef TALK_SESSION_MEDIA_CURRENTSPEAKERMONITOR_H_
+#define TALK_SESSION_MEDIA_CURRENTSPEAKERMONITOR_H_
+
+#include <map>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/sigslot.h"
+
+namespace cricket {
+
+class BaseSession;
+class Call;
+class Session;
+struct AudioInfo;
+struct MediaStreams;
+
+// Note that the call's audio monitor must be started before this is started.
+// It's recommended that the audio monitor be started with a 100 ms period.
+class CurrentSpeakerMonitor : public sigslot::has_slots<> {
+ public:
+ CurrentSpeakerMonitor(Call* call, BaseSession* session);
+ ~CurrentSpeakerMonitor();
+
+ BaseSession* session() const { return session_; }
+
+ void Start();
+ void Stop();
+
+ // Used by tests. Note that the actual minimum time between switches
+ // enforced by the monitor will be the given value plus or minus the
+ // resolution of the system clock.
+ void set_min_time_between_switches(uint32 min_time_between_switches);
+
+ // This is fired when the current speaker changes, and provides his audio
+ // SSRC. This only fires after the audio monitor on the underlying Call has
+ // been started.
+ sigslot::signal2<CurrentSpeakerMonitor*, uint32> SignalUpdate;
+
+ private:
+ void OnAudioMonitor(Call* call, const AudioInfo& info);
+ void OnMediaStreamsUpdate(Call* call,
+ Session* session,
+ const MediaStreams& added,
+ const MediaStreams& removed);
+
+ // These are states that a participant will pass through so that we gradually
+ // recognize that they have started and stopped speaking. This avoids
+ // "twitchiness".
+ enum SpeakingState {
+ SS_NOT_SPEAKING,
+ SS_MIGHT_BE_SPEAKING,
+ SS_SPEAKING,
+ SS_WAS_SPEAKING_RECENTLY1,
+ SS_WAS_SPEAKING_RECENTLY2
+ };
+
+ bool started_;
+ Call* call_;
+ BaseSession* session_;
+ std::map<uint32, SpeakingState> ssrc_to_speaking_state_map_;
+ uint32 current_speaker_ssrc_;
+ // To prevent overswitching, switching is disabled for some time after a
+ // switch is made. This gives us the earliest time a switch is permitted.
+ uint32 earliest_permitted_switch_time_;
+ uint32 min_time_between_switches_;
+};
+
+}
+
+#endif // TALK_SESSION_MEDIA_CURRENTSPEAKERMONITOR_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor_unittest.cc
new file mode 100644
index 00000000000..1306f894043
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/currentspeakermonitor_unittest.cc
@@ -0,0 +1,232 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/base/gunit.h"
+#include "talk/base/thread.h"
+#include "talk/session/media/call.h"
+#include "talk/session/media/currentspeakermonitor.h"
+
+namespace cricket {
+
+static const uint32 kSsrc1 = 1001;
+static const uint32 kSsrc2 = 1002;
+static const uint32 kMinTimeBetweenSwitches = 10;
+// Due to limited system clock resolution, the CurrentSpeakerMonitor may
+// actually require more or less time between switches than that specified
+// in the call to set_min_time_between_switches. To be safe, we sleep for
+// 90 ms more than the min time between switches before checking for a switch.
+// I am assuming system clocks do not have a coarser resolution than 90 ms.
+static const uint32 kSleepTimeBetweenSwitches = 100;
+
+class MockCall : public Call {
+ public:
+ MockCall() : Call(NULL) {}
+
+ void EmitAudioMonitor(const AudioInfo& info) {
+ SignalAudioMonitor(this, info);
+ }
+};
+
+class CurrentSpeakerMonitorTest : public testing::Test,
+ public sigslot::has_slots<> {
+ public:
+ CurrentSpeakerMonitorTest() {
+ call_ = new MockCall();
+ monitor_ = new CurrentSpeakerMonitor(call_, NULL);
+ // Shrink the minimum time betweeen switches to 10 ms so we don't have to
+ // slow down our tests.
+ monitor_->set_min_time_between_switches(kMinTimeBetweenSwitches);
+ monitor_->SignalUpdate.connect(this, &CurrentSpeakerMonitorTest::OnUpdate);
+ current_speaker_ = 0;
+ num_changes_ = 0;
+ monitor_->Start();
+ }
+
+ ~CurrentSpeakerMonitorTest() {
+ delete monitor_;
+ delete call_;
+ }
+
+ protected:
+ MockCall* call_;
+ CurrentSpeakerMonitor* monitor_;
+ int num_changes_;
+ uint32 current_speaker_;
+
+ void OnUpdate(CurrentSpeakerMonitor* monitor, uint32 current_speaker) {
+ current_speaker_ = current_speaker;
+ num_changes_++;
+ }
+};
+
+static void InitAudioInfo(AudioInfo* info, int input_level, int output_level) {
+ info->input_level = input_level;
+ info->output_level = output_level;
+}
+
+TEST_F(CurrentSpeakerMonitorTest, NoActiveStreams) {
+ AudioInfo info;
+ InitAudioInfo(&info, 0, 0);
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, 0U);
+ EXPECT_EQ(num_changes_, 0);
+}
+
+TEST_F(CurrentSpeakerMonitorTest, MultipleActiveStreams) {
+ AudioInfo info;
+ InitAudioInfo(&info, 0, 0);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 7));
+ call_->EmitAudioMonitor(info);
+
+ // No speaker recognized because the initial sample is treated as possibly
+ // just noise and disregarded.
+ EXPECT_EQ(current_speaker_, 0U);
+ EXPECT_EQ(num_changes_, 0);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 7));
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, kSsrc2);
+ EXPECT_EQ(num_changes_, 1);
+}
+
+TEST_F(CurrentSpeakerMonitorTest, RapidSpeakerChange) {
+ AudioInfo info;
+ InitAudioInfo(&info, 0, 0);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 7));
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, 0U);
+ EXPECT_EQ(num_changes_, 0);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 7));
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, kSsrc2);
+ EXPECT_EQ(num_changes_, 1);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 9));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 1));
+ call_->EmitAudioMonitor(info);
+
+ // We expect no speaker change because of the rapid change.
+ EXPECT_EQ(current_speaker_, kSsrc2);
+ EXPECT_EQ(num_changes_, 1);
+}
+
+TEST_F(CurrentSpeakerMonitorTest, SpeakerChange) {
+ AudioInfo info;
+ InitAudioInfo(&info, 0, 0);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 7));
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, 0U);
+ EXPECT_EQ(num_changes_, 0);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 7));
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, kSsrc2);
+ EXPECT_EQ(num_changes_, 1);
+
+ // Wait so the changes don't come so rapidly.
+ talk_base::Thread::SleepMs(kSleepTimeBetweenSwitches);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 9));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 1));
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, kSsrc1);
+ EXPECT_EQ(num_changes_, 2);
+}
+
+TEST_F(CurrentSpeakerMonitorTest, InterwordSilence) {
+ AudioInfo info;
+ InitAudioInfo(&info, 0, 0);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 7));
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, 0U);
+ EXPECT_EQ(num_changes_, 0);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 7));
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, kSsrc2);
+ EXPECT_EQ(num_changes_, 1);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 7));
+ call_->EmitAudioMonitor(info);
+
+ EXPECT_EQ(current_speaker_, kSsrc2);
+ EXPECT_EQ(num_changes_, 1);
+
+ // Wait so the changes don't come so rapidly.
+ talk_base::Thread::SleepMs(kSleepTimeBetweenSwitches);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 0));
+ call_->EmitAudioMonitor(info);
+
+ // Current speaker shouldn't have changed because we treat this as an inter-
+ // word silence.
+ EXPECT_EQ(current_speaker_, kSsrc2);
+ EXPECT_EQ(num_changes_, 1);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 0));
+ call_->EmitAudioMonitor(info);
+
+ // Current speaker shouldn't have changed because we treat this as an inter-
+ // word silence.
+ EXPECT_EQ(current_speaker_, kSsrc2);
+ EXPECT_EQ(num_changes_, 1);
+
+ info.active_streams.push_back(std::make_pair(kSsrc1, 3));
+ info.active_streams.push_back(std::make_pair(kSsrc2, 0));
+ call_->EmitAudioMonitor(info);
+
+ // At this point, we should have concluded that SSRC2 stopped speaking.
+ EXPECT_EQ(current_speaker_, kSsrc1);
+ EXPECT_EQ(num_changes_, 2);
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediamessages.cc b/chromium/third_party/libjingle/source/talk/session/media/mediamessages.cc
new file mode 100644
index 00000000000..6b5d03cf95d
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediamessages.cc
@@ -0,0 +1,394 @@
+/*
+ * libjingle
+ * Copyright 2010 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Documentation is in mediamessages.h.
+ */
+
+#include "talk/session/media/mediamessages.h"
+
+#include "talk/base/logging.h"
+#include "talk/base/stringencode.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/base/parsing.h"
+#include "talk/session/media/mediasessionclient.h"
+#include "talk/xmllite/xmlelement.h"
+
+namespace cricket {
+
+namespace {
+
+// NOTE: There is no check here for duplicate streams, so check before
+// adding.
+void AddStream(std::vector<StreamParams>* streams, const StreamParams& stream) {
+ streams->push_back(stream);
+}
+
+bool ParseSsrc(const std::string& string, uint32* ssrc) {
+ return talk_base::FromString(string, ssrc);
+}
+
+bool ParseSsrc(const buzz::XmlElement* element, uint32* ssrc) {
+ if (element == NULL) {
+ return false;
+ }
+ return ParseSsrc(element->BodyText(), ssrc);
+}
+
+// Builds a <view> element according to the following spec:
+// goto/jinglemuc
+buzz::XmlElement* CreateViewElem(const std::string& name,
+ const std::string& type) {
+ buzz::XmlElement* view_elem =
+ new buzz::XmlElement(QN_JINGLE_DRAFT_VIEW, true);
+ view_elem->AddAttr(QN_NAME, name);
+ view_elem->SetAttr(QN_TYPE, type);
+ return view_elem;
+}
+
+buzz::XmlElement* CreateVideoViewElem(const std::string& content_name,
+ const std::string& type) {
+ return CreateViewElem(content_name, type);
+}
+
+buzz::XmlElement* CreateNoneVideoViewElem(const std::string& content_name) {
+ return CreateVideoViewElem(content_name, STR_JINGLE_DRAFT_VIEW_TYPE_NONE);
+}
+
+buzz::XmlElement* CreateStaticVideoViewElem(const std::string& content_name,
+ const StaticVideoView& view) {
+ buzz::XmlElement* view_elem =
+ CreateVideoViewElem(content_name, STR_JINGLE_DRAFT_VIEW_TYPE_STATIC);
+ AddXmlAttr(view_elem, QN_SSRC, view.selector.ssrc);
+
+ buzz::XmlElement* params_elem = new buzz::XmlElement(QN_JINGLE_DRAFT_PARAMS);
+ AddXmlAttr(params_elem, QN_WIDTH, view.width);
+ AddXmlAttr(params_elem, QN_HEIGHT, view.height);
+ AddXmlAttr(params_elem, QN_FRAMERATE, view.framerate);
+ AddXmlAttr(params_elem, QN_PREFERENCE, view.preference);
+ view_elem->AddElement(params_elem);
+
+ return view_elem;
+}
+
+} // namespace
+
+bool MediaStreams::GetAudioStream(
+ const StreamSelector& selector, StreamParams* stream) {
+ return GetStream(audio_, selector, stream);
+}
+
+bool MediaStreams::GetVideoStream(
+ const StreamSelector& selector, StreamParams* stream) {
+ return GetStream(video_, selector, stream);
+}
+
+bool MediaStreams::GetDataStream(
+ const StreamSelector& selector, StreamParams* stream) {
+ return GetStream(data_, selector, stream);
+}
+
+void MediaStreams::CopyFrom(const MediaStreams& streams) {
+ audio_ = streams.audio_;
+ video_ = streams.video_;
+ data_ = streams.data_;
+}
+
+void MediaStreams::AddAudioStream(const StreamParams& stream) {
+ AddStream(&audio_, stream);
+}
+
+void MediaStreams::AddVideoStream(const StreamParams& stream) {
+ AddStream(&video_, stream);
+}
+
+void MediaStreams::AddDataStream(const StreamParams& stream) {
+ AddStream(&data_, stream);
+}
+
+bool MediaStreams::RemoveAudioStream(
+ const StreamSelector& selector) {
+ return RemoveStream(&audio_, selector);
+}
+
+bool MediaStreams::RemoveVideoStream(
+ const StreamSelector& selector) {
+ return RemoveStream(&video_, selector);
+}
+
+bool MediaStreams::RemoveDataStream(
+ const StreamSelector& selector) {
+ return RemoveStream(&data_, selector);
+}
+
+bool IsJingleViewRequest(const buzz::XmlElement* action_elem) {
+ return action_elem->FirstNamed(QN_JINGLE_DRAFT_VIEW) != NULL;
+}
+
+bool ParseStaticVideoView(const buzz::XmlElement* view_elem,
+ StaticVideoView* view,
+ ParseError* error) {
+ uint32 ssrc;
+ if (!ParseSsrc(view_elem->Attr(QN_SSRC), &ssrc)) {
+ return BadParse("Invalid or missing view ssrc.", error);
+ }
+ view->selector = StreamSelector(ssrc);
+
+ const buzz::XmlElement* params_elem =
+ view_elem->FirstNamed(QN_JINGLE_DRAFT_PARAMS);
+ if (params_elem) {
+ view->width = GetXmlAttr(params_elem, QN_WIDTH, 0);
+ view->height = GetXmlAttr(params_elem, QN_HEIGHT, 0);
+ view->framerate = GetXmlAttr(params_elem, QN_FRAMERATE, 0);
+ view->preference = GetXmlAttr(params_elem, QN_PREFERENCE, 0);
+ } else {
+ return BadParse("Missing view params.", error);
+ }
+
+ return true;
+}
+
+bool ParseJingleViewRequest(const buzz::XmlElement* action_elem,
+ ViewRequest* view_request,
+ ParseError* error) {
+ for (const buzz::XmlElement* view_elem =
+ action_elem->FirstNamed(QN_JINGLE_DRAFT_VIEW);
+ view_elem != NULL;
+ view_elem = view_elem->NextNamed(QN_JINGLE_DRAFT_VIEW)) {
+ std::string type = view_elem->Attr(QN_TYPE);
+ if (STR_JINGLE_DRAFT_VIEW_TYPE_NONE == type) {
+ view_request->static_video_views.clear();
+ return true;
+ } else if (STR_JINGLE_DRAFT_VIEW_TYPE_STATIC == type) {
+ StaticVideoView static_video_view(StreamSelector(0), 0, 0, 0);
+ if (!ParseStaticVideoView(view_elem, &static_video_view, error)) {
+ return false;
+ }
+ view_request->static_video_views.push_back(static_video_view);
+ } else {
+ LOG(LS_INFO) << "Ingnoring unknown view type: " << type;
+ }
+ }
+ return true;
+}
+
+bool WriteJingleViewRequest(const std::string& content_name,
+ const ViewRequest& request,
+ XmlElements* elems,
+ WriteError* error) {
+ if (request.static_video_views.empty()) {
+ elems->push_back(CreateNoneVideoViewElem(content_name));
+ } else {
+ for (StaticVideoViews::const_iterator view =
+ request.static_video_views.begin();
+ view != request.static_video_views.end(); ++view) {
+ elems->push_back(CreateStaticVideoViewElem(content_name, *view));
+ }
+ }
+ return true;
+}
+
+bool ParseSsrcAsLegacyStream(const buzz::XmlElement* desc_elem,
+ std::vector<StreamParams>* streams,
+ ParseError* error) {
+ const std::string ssrc_str = desc_elem->Attr(QN_SSRC);
+ if (!ssrc_str.empty()) {
+ uint32 ssrc;
+ if (!ParseSsrc(ssrc_str, &ssrc)) {
+ return BadParse("Missing or invalid ssrc.", error);
+ }
+
+ streams->push_back(StreamParams::CreateLegacy(ssrc));
+ }
+ return true;
+}
+
+bool ParseSsrcs(const buzz::XmlElement* parent_elem,
+ std::vector<uint32>* ssrcs,
+ ParseError* error) {
+ for (const buzz::XmlElement* ssrc_elem =
+ parent_elem->FirstNamed(QN_JINGLE_DRAFT_SSRC);
+ ssrc_elem != NULL;
+ ssrc_elem = ssrc_elem->NextNamed(QN_JINGLE_DRAFT_SSRC)) {
+ uint32 ssrc;
+ if (!ParseSsrc(ssrc_elem->BodyText(), &ssrc)) {
+ return BadParse("Missing or invalid ssrc.", error);
+ }
+
+ ssrcs->push_back(ssrc);
+ }
+ return true;
+}
+
+bool ParseSsrcGroups(const buzz::XmlElement* parent_elem,
+ std::vector<SsrcGroup>* ssrc_groups,
+ ParseError* error) {
+ for (const buzz::XmlElement* group_elem =
+ parent_elem->FirstNamed(QN_JINGLE_DRAFT_SSRC_GROUP);
+ group_elem != NULL;
+ group_elem = group_elem->NextNamed(QN_JINGLE_DRAFT_SSRC_GROUP)) {
+ std::string semantics = group_elem->Attr(QN_SEMANTICS);
+ std::vector<uint32> ssrcs;
+ if (!ParseSsrcs(group_elem, &ssrcs, error)) {
+ return false;
+ }
+ ssrc_groups->push_back(SsrcGroup(semantics, ssrcs));
+ }
+ return true;
+}
+
+bool ParseJingleStream(const buzz::XmlElement* stream_elem,
+ std::vector<StreamParams>* streams,
+ ParseError* error) {
+ StreamParams stream;
+ // We treat the nick as a stream groupid.
+ stream.groupid = stream_elem->Attr(QN_NICK);
+ stream.id = stream_elem->Attr(QN_NAME);
+ stream.type = stream_elem->Attr(QN_TYPE);
+ stream.display = stream_elem->Attr(QN_DISPLAY);
+ stream.cname = stream_elem->Attr(QN_CNAME);
+ if (!ParseSsrcs(stream_elem, &(stream.ssrcs), error)) {
+ return false;
+ }
+ std::vector<SsrcGroup> ssrc_groups;
+ if (!ParseSsrcGroups(stream_elem, &(stream.ssrc_groups), error)) {
+ return false;
+ }
+ streams->push_back(stream);
+ return true;
+}
+
+bool ParseJingleRtpHeaderExtensions(const buzz::XmlElement* parent_elem,
+ std::vector<RtpHeaderExtension>* hdrexts,
+ ParseError* error) {
+ for (const buzz::XmlElement* hdrext_elem =
+ parent_elem->FirstNamed(QN_JINGLE_RTP_HDREXT);
+ hdrext_elem != NULL;
+ hdrext_elem = hdrext_elem->NextNamed(QN_JINGLE_RTP_HDREXT)) {
+ std::string uri = hdrext_elem->Attr(QN_URI);
+ int id = GetXmlAttr(hdrext_elem, QN_ID, 0);
+ if (id <= 0) {
+ return BadParse("Invalid RTP header extension id.", error);
+ }
+ hdrexts->push_back(RtpHeaderExtension(uri, id));
+ }
+ return true;
+}
+
+bool HasJingleStreams(const buzz::XmlElement* desc_elem) {
+ const buzz::XmlElement* streams_elem =
+ desc_elem->FirstNamed(QN_JINGLE_DRAFT_STREAMS);
+ return (streams_elem != NULL);
+}
+
+bool ParseJingleStreams(const buzz::XmlElement* desc_elem,
+ std::vector<StreamParams>* streams,
+ ParseError* error) {
+ const buzz::XmlElement* streams_elem =
+ desc_elem->FirstNamed(QN_JINGLE_DRAFT_STREAMS);
+ if (streams_elem == NULL) {
+ return BadParse("Missing streams element.", error);
+ }
+ for (const buzz::XmlElement* stream_elem =
+ streams_elem->FirstNamed(QN_JINGLE_DRAFT_STREAM);
+ stream_elem != NULL;
+ stream_elem = stream_elem->NextNamed(QN_JINGLE_DRAFT_STREAM)) {
+ if (!ParseJingleStream(stream_elem, streams, error)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void WriteSsrcs(const std::vector<uint32>& ssrcs,
+ buzz::XmlElement* parent_elem) {
+ for (std::vector<uint32>::const_iterator ssrc = ssrcs.begin();
+ ssrc != ssrcs.end(); ++ssrc) {
+ buzz::XmlElement* ssrc_elem =
+ new buzz::XmlElement(QN_JINGLE_DRAFT_SSRC, false);
+ SetXmlBody(ssrc_elem, *ssrc);
+
+ parent_elem->AddElement(ssrc_elem);
+ }
+}
+
+void WriteSsrcGroups(const std::vector<SsrcGroup>& groups,
+ buzz::XmlElement* parent_elem) {
+ for (std::vector<SsrcGroup>::const_iterator group = groups.begin();
+ group != groups.end(); ++group) {
+ buzz::XmlElement* group_elem =
+ new buzz::XmlElement(QN_JINGLE_DRAFT_SSRC_GROUP, false);
+ AddXmlAttrIfNonEmpty(group_elem, QN_SEMANTICS, group->semantics);
+ WriteSsrcs(group->ssrcs, group_elem);
+
+ parent_elem->AddElement(group_elem);
+ }
+}
+
+void WriteJingleStream(const StreamParams& stream,
+ buzz::XmlElement* parent_elem) {
+ buzz::XmlElement* stream_elem =
+ new buzz::XmlElement(QN_JINGLE_DRAFT_STREAM, false);
+ // We treat the nick as a stream groupid.
+ AddXmlAttrIfNonEmpty(stream_elem, QN_NICK, stream.groupid);
+ AddXmlAttrIfNonEmpty(stream_elem, QN_NAME, stream.id);
+ AddXmlAttrIfNonEmpty(stream_elem, QN_TYPE, stream.type);
+ AddXmlAttrIfNonEmpty(stream_elem, QN_DISPLAY, stream.display);
+ AddXmlAttrIfNonEmpty(stream_elem, QN_CNAME, stream.cname);
+ WriteSsrcs(stream.ssrcs, stream_elem);
+ WriteSsrcGroups(stream.ssrc_groups, stream_elem);
+
+ parent_elem->AddElement(stream_elem);
+}
+
+void WriteJingleStreams(const std::vector<StreamParams>& streams,
+ buzz::XmlElement* parent_elem) {
+ buzz::XmlElement* streams_elem =
+ new buzz::XmlElement(QN_JINGLE_DRAFT_STREAMS, true);
+ for (std::vector<StreamParams>::const_iterator stream = streams.begin();
+ stream != streams.end(); ++stream) {
+ WriteJingleStream(*stream, streams_elem);
+ }
+
+ parent_elem->AddElement(streams_elem);
+}
+
+void WriteJingleRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& hdrexts,
+ buzz::XmlElement* parent_elem) {
+ for (std::vector<RtpHeaderExtension>::const_iterator hdrext = hdrexts.begin();
+ hdrext != hdrexts.end(); ++hdrext) {
+ buzz::XmlElement* hdrext_elem =
+ new buzz::XmlElement(QN_JINGLE_RTP_HDREXT, false);
+ AddXmlAttr(hdrext_elem, QN_URI, hdrext->uri);
+ AddXmlAttr(hdrext_elem, QN_ID, hdrext->id);
+ parent_elem->AddElement(hdrext_elem);
+ }
+}
+
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediamessages.h b/chromium/third_party/libjingle/source/talk/session/media/mediamessages.h
new file mode 100644
index 00000000000..dcb48a85a3d
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediamessages.h
@@ -0,0 +1,169 @@
+/*
+ * libjingle
+ * Copyright 2010 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * A collection of functions and types for serializing and
+ * deserializing Jingle session messages related to media.
+ * Specificially, the <notify> and <view> messages. They are not yet
+ * standardized, but their current documentation can be found at:
+ * goto/jinglemuc
+ */
+
+#ifndef TALK_SESSION_MEDIA_MEDIAMESSAGES_H_
+#define TALK_SESSION_MEDIA_MEDIAMESSAGES_H_
+
+#include <string>
+#include <vector>
+
+#include "talk/base/basictypes.h"
+#include "talk/media/base/mediachannel.h" // For RtpHeaderExtension
+#include "talk/media/base/streamparams.h"
+#include "talk/p2p/base/parsing.h"
+#include "talk/p2p/base/sessiondescription.h"
+
+namespace cricket {
+
+// A collection of audio and video and data streams. Most of the
+// methods are merely for convenience. Many of these methods are keyed
+// by ssrc, which is the source identifier in the RTP spec
+// (http://tools.ietf.org/html/rfc3550).
+struct MediaStreams {
+ public:
+ MediaStreams() {}
+ void CopyFrom(const MediaStreams& sources);
+
+ bool empty() const {
+ return audio_.empty() && video_.empty() && data_.empty();
+ }
+
+ std::vector<StreamParams>* mutable_audio() { return &audio_; }
+ std::vector<StreamParams>* mutable_video() { return &video_; }
+ std::vector<StreamParams>* mutable_data() { return &data_; }
+ const std::vector<StreamParams>& audio() const { return audio_; }
+ const std::vector<StreamParams>& video() const { return video_; }
+ const std::vector<StreamParams>& data() const { return data_; }
+
+ // Gets a stream, returning true if found.
+ bool GetAudioStream(
+ const StreamSelector& selector, StreamParams* stream);
+ bool GetVideoStream(
+ const StreamSelector& selector, StreamParams* stream);
+ bool GetDataStream(
+ const StreamSelector& selector, StreamParams* stream);
+ // Adds a stream.
+ void AddAudioStream(const StreamParams& stream);
+ void AddVideoStream(const StreamParams& stream);
+ void AddDataStream(const StreamParams& stream);
+ // Removes a stream, returning true if found and removed.
+ bool RemoveAudioStream(const StreamSelector& selector);
+ bool RemoveVideoStream(const StreamSelector& selector);
+ bool RemoveDataStream(const StreamSelector& selector);
+
+ private:
+ std::vector<StreamParams> audio_;
+ std::vector<StreamParams> video_;
+ std::vector<StreamParams> data_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaStreams);
+};
+
+// In a <view> message, there are a number of views specified. This
+// represents one such view. We currently only support "static"
+// views.
+struct StaticVideoView {
+ StaticVideoView(const StreamSelector& selector,
+ int width, int height, int framerate)
+ : selector(selector),
+ width(width),
+ height(height),
+ framerate(framerate),
+ preference(0) {
+ }
+
+ StreamSelector selector;
+ int width;
+ int height;
+ int framerate;
+ int preference;
+};
+
+typedef std::vector<StaticVideoView> StaticVideoViews;
+
+// Represents a whole view request message, which contains many views.
+struct ViewRequest {
+ StaticVideoViews static_video_views;
+};
+
+// If the parent element (usually <jingle>) is a jingle view.
+bool IsJingleViewRequest(const buzz::XmlElement* action_elem);
+
+// Parses a view request from the parent element (usually
+// <jingle>). If it fails, it returns false and fills an error
+// message.
+bool ParseJingleViewRequest(const buzz::XmlElement* action_elem,
+ ViewRequest* view_request,
+ ParseError* error);
+
+// Serializes a view request to XML. If it fails, returns false and
+// fills in an error message.
+bool WriteJingleViewRequest(const std::string& content_name,
+ const ViewRequest& view,
+ XmlElements* elems,
+ WriteError* error);
+
+// TODO(pthatcher): Get rid of legacy source notify and replace with
+// description-info as soon as reflector is capable of sending it.
+bool IsSourcesNotify(const buzz::XmlElement* action_elem);
+
+// If the given elem has <streams>.
+bool HasJingleStreams(const buzz::XmlElement* desc_elem);
+
+// Parses streams from a jingle <description>. If it fails, returns
+// false and fills an error message.
+bool ParseJingleStreams(const buzz::XmlElement* desc_elem,
+ std::vector<StreamParams>* streams,
+ ParseError* error);
+
+// Write a <streams> element to the parent_elem.
+void WriteJingleStreams(const std::vector<StreamParams>& streams,
+ buzz::XmlElement* parent_elem);
+
+// Parses rtp header extensions from a jingle <description>. If it
+// fails, returns false and fills an error message.
+bool ParseJingleRtpHeaderExtensions(
+ const buzz::XmlElement* desc_elem,
+ std::vector<RtpHeaderExtension>* hdrexts,
+ ParseError* error);
+
+// Writes <rtp-hdrext> elements to the parent_elem.
+void WriteJingleRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& hdrexts,
+ buzz::XmlElement* parent_elem);
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_MEDIAMESSAGES_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediamessages_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/mediamessages_unittest.cc
new file mode 100644
index 00000000000..c7c81c3d2e6
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediamessages_unittest.cc
@@ -0,0 +1,363 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/mediamessages.h"
+
+#include <string>
+#include <vector>
+
+#include "talk/base/gunit.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/session/media/mediasessionclient.h"
+#include "talk/xmllite/xmlelement.h"
+
+// Unit tests for mediamessages.cc.
+
+namespace cricket {
+
+namespace {
+
+static const char kViewVideoNoneXml[] =
+ "<view xmlns='google:jingle'"
+ " name='video1'"
+ " type='none'"
+ "/>";
+
+class MediaMessagesTest : public testing::Test {
+ public:
+ // CreateMediaSessionDescription uses a static variable cricket::NS_JINGLE_RTP
+ // defined in another file and cannot be used to initialize another static
+ // variable (http://www.parashift.com/c++-faq-lite/ctors.html#faq-10.14)
+ MediaMessagesTest()
+ : remote_description_(CreateMediaSessionDescription("audio1", "video1")) {
+ }
+
+ protected:
+ static std::string ViewVideoStaticVgaXml(const std::string& ssrc) {
+ return "<view xmlns='google:jingle'"
+ " name='video1'"
+ " type='static'"
+ " ssrc='" + ssrc + "'"
+ ">"
+ "<params"
+ " width='640'"
+ " height='480'"
+ " framerate='30'"
+ " preference='0'"
+ " />"
+ "</view>";
+ }
+
+ static cricket::StreamParams CreateStream(const std::string& nick,
+ const std::string& name,
+ uint32 ssrc1,
+ uint32 ssrc2,
+ const std::string& semantics,
+ const std::string& type,
+ const std::string& display) {
+ StreamParams stream;
+ stream.groupid = nick;
+ stream.id = name;
+ stream.ssrcs.push_back(ssrc1);
+ stream.ssrcs.push_back(ssrc2);
+ stream.ssrc_groups.push_back(
+ cricket::SsrcGroup(semantics, stream.ssrcs));
+ stream.type = type;
+ stream.display = display;
+ return stream;
+ }
+
+ static std::string StreamsXml(const std::string& stream1,
+ const std::string& stream2) {
+ return "<streams xmlns='google:jingle'>"
+ + stream1
+ + stream2 +
+ "</streams>";
+ }
+
+
+ static std::string StreamXml(const std::string& nick,
+ const std::string& name,
+ const std::string& ssrc1,
+ const std::string& ssrc2,
+ const std::string& semantics,
+ const std::string& type,
+ const std::string& display) {
+ return "<stream"
+ " nick='" + nick + "'"
+ " name='" + name + "'"
+ " type='" + type + "'"
+ " display='" + display + "'"
+ ">"
+ "<ssrc>" + ssrc1 + "</ssrc>"
+ "<ssrc>" + ssrc2 + "</ssrc>"
+ "<ssrc-group"
+ " semantics='" + semantics + "'"
+ ">"
+ "<ssrc>" + ssrc1 + "</ssrc>"
+ "<ssrc>" + ssrc2 + "</ssrc>"
+ "</ssrc-group>"
+ "</stream>";
+ }
+
+ static std::string HeaderExtensionsXml(const std::string& hdrext1,
+ const std::string& hdrext2) {
+ return "<rtp:description xmlns:rtp=\"urn:xmpp:jingle:apps:rtp:1\">"
+ + hdrext1
+ + hdrext2 +
+ "</rtp:description>";
+ }
+
+ static std::string HeaderExtensionXml(const std::string& uri,
+ const std::string& id) {
+ return "<rtp:rtp-hdrext"
+ " uri='" + uri + "'"
+ " id='" + id + "'"
+ "/>";
+ }
+
+ static cricket::SessionDescription* CreateMediaSessionDescription(
+ const std::string& audio_content_name,
+ const std::string& video_content_name) {
+ cricket::SessionDescription* desc = new cricket::SessionDescription();
+ desc->AddContent(audio_content_name, cricket::NS_JINGLE_RTP,
+ new cricket::AudioContentDescription());
+ desc->AddContent(video_content_name, cricket::NS_JINGLE_RTP,
+ new cricket::VideoContentDescription());
+ return desc;
+ }
+
+ size_t ClearXmlElements(cricket::XmlElements* elements) {
+ size_t size = elements->size();
+ for (size_t i = 0; i < size; i++) {
+ delete elements->at(i);
+ }
+ elements->clear();
+ return size;
+ }
+
+ talk_base::scoped_ptr<cricket::SessionDescription> remote_description_;
+};
+
+} // anonymous namespace
+
+// Test serializing/deserializing an empty <view> message.
+TEST_F(MediaMessagesTest, ViewNoneToFromXml) {
+ buzz::XmlElement* expected_view_elem =
+ buzz::XmlElement::ForStr(kViewVideoNoneXml);
+ talk_base::scoped_ptr<buzz::XmlElement> action_elem(
+ new buzz::XmlElement(QN_JINGLE));
+
+ EXPECT_FALSE(cricket::IsJingleViewRequest(action_elem.get()));
+ action_elem->AddElement(expected_view_elem);
+ EXPECT_TRUE(cricket::IsJingleViewRequest(action_elem.get()));
+
+ cricket::ViewRequest view_request;
+ cricket::XmlElements actual_view_elems;
+ cricket::WriteError error;
+
+ ASSERT_TRUE(cricket::WriteJingleViewRequest(
+ "video1", view_request, &actual_view_elems, &error));
+
+ ASSERT_EQ(1U, actual_view_elems.size());
+ EXPECT_EQ(expected_view_elem->Str(), actual_view_elems[0]->Str());
+ ClearXmlElements(&actual_view_elems);
+
+ cricket::ParseError parse_error;
+ EXPECT_TRUE(cricket::IsJingleViewRequest(action_elem.get()));
+ ASSERT_TRUE(cricket::ParseJingleViewRequest(
+ action_elem.get(), &view_request, &parse_error));
+ EXPECT_EQ(0U, view_request.static_video_views.size());
+}
+
+// Test serializing/deserializing an a simple vga <view> message.
+TEST_F(MediaMessagesTest, ViewVgaToFromXml) {
+ talk_base::scoped_ptr<buzz::XmlElement> action_elem(
+ new buzz::XmlElement(QN_JINGLE));
+ buzz::XmlElement* expected_view_elem1 =
+ buzz::XmlElement::ForStr(ViewVideoStaticVgaXml("1234"));
+ buzz::XmlElement* expected_view_elem2 =
+ buzz::XmlElement::ForStr(ViewVideoStaticVgaXml("2468"));
+ action_elem->AddElement(expected_view_elem1);
+ action_elem->AddElement(expected_view_elem2);
+
+ cricket::ViewRequest view_request;
+ cricket::XmlElements actual_view_elems;
+ cricket::WriteError error;
+
+ view_request.static_video_views.push_back(cricket::StaticVideoView(
+ cricket::StreamSelector(1234), 640, 480, 30));
+ view_request.static_video_views.push_back(cricket::StaticVideoView(
+ cricket::StreamSelector(2468), 640, 480, 30));
+
+ ASSERT_TRUE(cricket::WriteJingleViewRequest(
+ "video1", view_request, &actual_view_elems, &error));
+
+ ASSERT_EQ(2U, actual_view_elems.size());
+ EXPECT_EQ(expected_view_elem1->Str(), actual_view_elems[0]->Str());
+ EXPECT_EQ(expected_view_elem2->Str(), actual_view_elems[1]->Str());
+ ClearXmlElements(&actual_view_elems);
+
+ view_request.static_video_views.clear();
+ cricket::ParseError parse_error;
+ EXPECT_TRUE(cricket::IsJingleViewRequest(action_elem.get()));
+ ASSERT_TRUE(cricket::ParseJingleViewRequest(
+ action_elem.get(), &view_request, &parse_error));
+ EXPECT_EQ(2U, view_request.static_video_views.size());
+ EXPECT_EQ(1234U, view_request.static_video_views[0].selector.ssrc);
+ EXPECT_EQ(640, view_request.static_video_views[0].width);
+ EXPECT_EQ(480, view_request.static_video_views[0].height);
+ EXPECT_EQ(30, view_request.static_video_views[0].framerate);
+ EXPECT_EQ(2468U, view_request.static_video_views[1].selector.ssrc);
+}
+
+// Test deserializing bad view XML.
+TEST_F(MediaMessagesTest, ParseBadViewXml) {
+ talk_base::scoped_ptr<buzz::XmlElement> action_elem(
+ new buzz::XmlElement(QN_JINGLE));
+ buzz::XmlElement* view_elem =
+ buzz::XmlElement::ForStr(ViewVideoStaticVgaXml("not-an-ssrc"));
+ action_elem->AddElement(view_elem);
+
+ cricket::ViewRequest view_request;
+ cricket::ParseError parse_error;
+ ASSERT_FALSE(cricket::ParseJingleViewRequest(
+ action_elem.get(), &view_request, &parse_error));
+}
+
+
+// Test serializing/deserializing typical streams xml.
+TEST_F(MediaMessagesTest, StreamsToFromXml) {
+ talk_base::scoped_ptr<buzz::XmlElement> expected_streams_elem(
+ buzz::XmlElement::ForStr(
+ StreamsXml(
+ StreamXml("nick1", "stream1", "101", "102",
+ "semantics1", "type1", "display1"),
+ StreamXml("nick2", "stream2", "201", "202",
+ "semantics2", "type2", "display2"))));
+
+ std::vector<cricket::StreamParams> expected_streams;
+ expected_streams.push_back(CreateStream("nick1", "stream1", 101U, 102U,
+ "semantics1", "type1", "display1"));
+ expected_streams.push_back(CreateStream("nick2", "stream2", 201U, 202U,
+ "semantics2", "type2", "display2"));
+
+ talk_base::scoped_ptr<buzz::XmlElement> actual_desc_elem(
+ new buzz::XmlElement(QN_JINGLE_RTP_CONTENT));
+ cricket::WriteJingleStreams(expected_streams, actual_desc_elem.get());
+
+ const buzz::XmlElement* actual_streams_elem =
+ actual_desc_elem->FirstNamed(QN_JINGLE_DRAFT_STREAMS);
+ ASSERT_TRUE(actual_streams_elem != NULL);
+ EXPECT_EQ(expected_streams_elem->Str(), actual_streams_elem->Str());
+
+ talk_base::scoped_ptr<buzz::XmlElement> expected_desc_elem(
+ new buzz::XmlElement(QN_JINGLE_RTP_CONTENT));
+ expected_desc_elem->AddElement(new buzz::XmlElement(
+ *expected_streams_elem));
+ std::vector<cricket::StreamParams> actual_streams;
+ cricket::ParseError parse_error;
+
+ EXPECT_TRUE(cricket::HasJingleStreams(expected_desc_elem.get()));
+ ASSERT_TRUE(cricket::ParseJingleStreams(
+ expected_desc_elem.get(), &actual_streams, &parse_error));
+ EXPECT_EQ(2U, actual_streams.size());
+ EXPECT_EQ(expected_streams[0], actual_streams[0]);
+ EXPECT_EQ(expected_streams[1], actual_streams[1]);
+}
+
+// Test deserializing bad streams xml.
+TEST_F(MediaMessagesTest, StreamsFromBadXml) {
+ talk_base::scoped_ptr<buzz::XmlElement> streams_elem(
+ buzz::XmlElement::ForStr(
+ StreamsXml(
+ StreamXml("nick1", "name1", "101", "not-an-ssrc",
+ "semantics1", "type1", "display1"),
+ StreamXml("nick2", "name2", "202", "not-an-ssrc",
+ "semantics2", "type2", "display2"))));
+ talk_base::scoped_ptr<buzz::XmlElement> desc_elem(
+ new buzz::XmlElement(QN_JINGLE_RTP_CONTENT));
+ desc_elem->AddElement(new buzz::XmlElement(*streams_elem));
+
+ std::vector<cricket::StreamParams> actual_streams;
+ cricket::ParseError parse_error;
+ ASSERT_FALSE(cricket::ParseJingleStreams(
+ desc_elem.get(), &actual_streams, &parse_error));
+}
+
+// Test serializing/deserializing typical RTP Header Extension xml.
+TEST_F(MediaMessagesTest, HeaderExtensionsToFromXml) {
+ talk_base::scoped_ptr<buzz::XmlElement> expected_desc_elem(
+ buzz::XmlElement::ForStr(
+ HeaderExtensionsXml(
+ HeaderExtensionXml("abc", "123"),
+ HeaderExtensionXml("def", "456"))));
+
+ std::vector<cricket::RtpHeaderExtension> expected_hdrexts;
+ expected_hdrexts.push_back(RtpHeaderExtension("abc", 123));
+ expected_hdrexts.push_back(RtpHeaderExtension("def", 456));
+
+ talk_base::scoped_ptr<buzz::XmlElement> actual_desc_elem(
+ new buzz::XmlElement(QN_JINGLE_RTP_CONTENT));
+ cricket::WriteJingleRtpHeaderExtensions(expected_hdrexts, actual_desc_elem.get());
+
+ ASSERT_TRUE(actual_desc_elem != NULL);
+ EXPECT_EQ(expected_desc_elem->Str(), actual_desc_elem->Str());
+
+ std::vector<cricket::RtpHeaderExtension> actual_hdrexts;
+ cricket::ParseError parse_error;
+ ASSERT_TRUE(cricket::ParseJingleRtpHeaderExtensions(
+ expected_desc_elem.get(), &actual_hdrexts, &parse_error));
+ EXPECT_EQ(2U, actual_hdrexts.size());
+ EXPECT_EQ(expected_hdrexts[0], actual_hdrexts[0]);
+ EXPECT_EQ(expected_hdrexts[1], actual_hdrexts[1]);
+}
+
+// Test deserializing bad RTP header extension xml.
+TEST_F(MediaMessagesTest, HeaderExtensionsFromBadXml) {
+ std::vector<cricket::RtpHeaderExtension> actual_hdrexts;
+ cricket::ParseError parse_error;
+
+ talk_base::scoped_ptr<buzz::XmlElement> desc_elem(
+ buzz::XmlElement::ForStr(
+ HeaderExtensionsXml(
+ HeaderExtensionXml("abc", "123"),
+ HeaderExtensionXml("def", "not-an-id"))));
+ ASSERT_FALSE(cricket::ParseJingleRtpHeaderExtensions(
+ desc_elem.get(), &actual_hdrexts, &parse_error));
+
+ desc_elem.reset(
+ buzz::XmlElement::ForStr(
+ HeaderExtensionsXml(
+ HeaderExtensionXml("abc", "123"),
+ HeaderExtensionXml("def", "-1"))));
+ ASSERT_FALSE(cricket::ParseJingleRtpHeaderExtensions(
+ desc_elem.get(), &actual_hdrexts, &parse_error));
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediamonitor.cc b/chromium/third_party/libjingle/source/talk/session/media/mediamonitor.cc
new file mode 100644
index 00000000000..844180eb87a
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediamonitor.cc
@@ -0,0 +1,108 @@
+/*
+ * libjingle
+ * Copyright 2005 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/base/common.h"
+#include "talk/session/media/channelmanager.h"
+#include "talk/session/media/mediamonitor.h"
+
+namespace cricket {
+
+enum {
+ MSG_MONITOR_POLL = 1,
+ MSG_MONITOR_START = 2,
+ MSG_MONITOR_STOP = 3,
+ MSG_MONITOR_SIGNAL = 4
+};
+
+MediaMonitor::MediaMonitor(talk_base::Thread* worker_thread,
+ talk_base::Thread* monitor_thread)
+ : worker_thread_(worker_thread),
+ monitor_thread_(monitor_thread), monitoring_(false), rate_(0) {
+}
+
+MediaMonitor::~MediaMonitor() {
+ monitoring_ = false;
+ monitor_thread_->Clear(this);
+ worker_thread_->Clear(this);
+}
+
+void MediaMonitor::Start(uint32 milliseconds) {
+ rate_ = milliseconds;
+ if (rate_ < 100)
+ rate_ = 100;
+ worker_thread_->Post(this, MSG_MONITOR_START);
+}
+
+void MediaMonitor::Stop() {
+ worker_thread_->Post(this, MSG_MONITOR_STOP);
+ rate_ = 0;
+}
+
+void MediaMonitor::OnMessage(talk_base::Message* message) {
+ talk_base::CritScope cs(&crit_);
+
+ switch (message->message_id) {
+ case MSG_MONITOR_START:
+ ASSERT(talk_base::Thread::Current() == worker_thread_);
+ if (!monitoring_) {
+ monitoring_ = true;
+ PollMediaChannel();
+ }
+ break;
+
+ case MSG_MONITOR_STOP:
+ ASSERT(talk_base::Thread::Current() == worker_thread_);
+ if (monitoring_) {
+ monitoring_ = false;
+ worker_thread_->Clear(this);
+ }
+ break;
+
+ case MSG_MONITOR_POLL:
+ ASSERT(talk_base::Thread::Current() == worker_thread_);
+ PollMediaChannel();
+ break;
+
+ case MSG_MONITOR_SIGNAL:
+ ASSERT(talk_base::Thread::Current() == monitor_thread_);
+ Update();
+ break;
+ }
+}
+
+void MediaMonitor::PollMediaChannel() {
+ talk_base::CritScope cs(&crit_);
+ ASSERT(talk_base::Thread::Current() == worker_thread_);
+
+ GetStats();
+
+ // Signal the monitoring thread, start another poll timer
+ monitor_thread_->Post(this, MSG_MONITOR_SIGNAL);
+ worker_thread_->PostDelayed(rate_, this, MSG_MONITOR_POLL);
+}
+
+}
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediamonitor.h b/chromium/third_party/libjingle/source/talk/session/media/mediamonitor.h
new file mode 100644
index 00000000000..a9ce8895905
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediamonitor.h
@@ -0,0 +1,98 @@
+/*
+ * libjingle
+ * Copyright 2005 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Class to collect statistics from a media channel
+
+#ifndef TALK_SESSION_MEDIA_MEDIAMONITOR_H_
+#define TALK_SESSION_MEDIA_MEDIAMONITOR_H_
+
+#include "talk/base/criticalsection.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/mediachannel.h"
+
+namespace cricket {
+
+// The base MediaMonitor class, independent of voice and video.
+class MediaMonitor : public talk_base::MessageHandler,
+ public sigslot::has_slots<> {
+ public:
+ MediaMonitor(talk_base::Thread* worker_thread,
+ talk_base::Thread* monitor_thread);
+ ~MediaMonitor();
+
+ void Start(uint32 milliseconds);
+ void Stop();
+
+ protected:
+ void OnMessage(talk_base::Message *message);
+ void PollMediaChannel();
+ virtual void GetStats() = 0;
+ virtual void Update() = 0;
+
+ talk_base::CriticalSection crit_;
+ talk_base::Thread* worker_thread_;
+ talk_base::Thread* monitor_thread_;
+ bool monitoring_;
+ uint32 rate_;
+};
+
+// Templatized MediaMonitor that can deal with different kinds of media.
+template<class MC, class MI>
+class MediaMonitorT : public MediaMonitor {
+ public:
+ MediaMonitorT(MC* media_channel, talk_base::Thread* worker_thread,
+ talk_base::Thread* monitor_thread)
+ : MediaMonitor(worker_thread, monitor_thread),
+ media_channel_(media_channel) {}
+ sigslot::signal2<MC*, const MI&> SignalUpdate;
+
+ protected:
+ // These routines assume the crit_ lock is held by the calling thread.
+ virtual void GetStats() {
+ media_info_.Clear();
+ media_channel_->GetStats(&media_info_);
+ }
+ virtual void Update() {
+ MI stats(media_info_);
+ crit_.Leave();
+ SignalUpdate(media_channel_, stats);
+ crit_.Enter();
+ }
+
+ private:
+ MC* media_channel_;
+ MI media_info_;
+};
+
+typedef MediaMonitorT<VoiceMediaChannel, VoiceMediaInfo> VoiceMediaMonitor;
+typedef MediaMonitorT<VideoMediaChannel, VideoMediaInfo> VideoMediaMonitor;
+typedef MediaMonitorT<DataMediaChannel, DataMediaInfo> DataMediaMonitor;
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_MEDIAMONITOR_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediarecorder.cc b/chromium/third_party/libjingle/source/talk/session/media/mediarecorder.cc
new file mode 100644
index 00000000000..0aed63a2ca7
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediarecorder.cc
@@ -0,0 +1,224 @@
+/*
+ * libjingle
+ * Copyright 2010 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/mediarecorder.h"
+
+#include <limits.h>
+
+#include <string>
+
+#include "talk/base/fileutils.h"
+#include "talk/base/logging.h"
+#include "talk/base/pathutils.h"
+#include "talk/media/base/rtpdump.h"
+
+
+namespace cricket {
+
+///////////////////////////////////////////////////////////////////////////
+// Implementation of RtpDumpSink.
+///////////////////////////////////////////////////////////////////////////
+RtpDumpSink::RtpDumpSink(talk_base::StreamInterface* stream)
+ : max_size_(INT_MAX),
+ recording_(false),
+ packet_filter_(PF_NONE) {
+ stream_.reset(stream);
+}
+
+RtpDumpSink::~RtpDumpSink() {}
+
+void RtpDumpSink::SetMaxSize(size_t size) {
+ talk_base::CritScope cs(&critical_section_);
+ max_size_ = size;
+}
+
+bool RtpDumpSink::Enable(bool enable) {
+ talk_base::CritScope cs(&critical_section_);
+
+ recording_ = enable;
+
+ // Create a file and the RTP writer if we have not done yet.
+ if (recording_ && !writer_) {
+ if (!stream_) {
+ return false;
+ }
+ writer_.reset(new RtpDumpWriter(stream_.get()));
+ writer_->set_packet_filter(packet_filter_);
+ } else if (!recording_ && stream_) {
+ stream_->Flush();
+ }
+ return true;
+}
+
+void RtpDumpSink::OnPacket(const void* data, size_t size, bool rtcp) {
+ talk_base::CritScope cs(&critical_section_);
+
+ if (recording_ && writer_) {
+ size_t current_size;
+ if (writer_->GetDumpSize(&current_size) &&
+ current_size + RtpDumpPacket::kHeaderLength + size <= max_size_) {
+ if (!rtcp) {
+ writer_->WriteRtpPacket(data, size);
+ } else {
+ // TODO(whyuan): Enable recording RTCP.
+ }
+ }
+ }
+}
+
+void RtpDumpSink::set_packet_filter(int filter) {
+ talk_base::CritScope cs(&critical_section_);
+ packet_filter_ = filter;
+ if (writer_) {
+ writer_->set_packet_filter(packet_filter_);
+ }
+}
+
+void RtpDumpSink::Flush() {
+ talk_base::CritScope cs(&critical_section_);
+ if (stream_) {
+ stream_->Flush();
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Implementation of MediaRecorder.
+///////////////////////////////////////////////////////////////////////////
+MediaRecorder::MediaRecorder() {}
+
+MediaRecorder::~MediaRecorder() {
+ talk_base::CritScope cs(&critical_section_);
+ std::map<BaseChannel*, SinkPair*>::iterator itr;
+ for (itr = sinks_.begin(); itr != sinks_.end(); ++itr) {
+ delete itr->second;
+ }
+}
+
+bool MediaRecorder::AddChannel(VoiceChannel* channel,
+ talk_base::StreamInterface* send_stream,
+ talk_base::StreamInterface* recv_stream,
+ int filter) {
+ return InternalAddChannel(channel, false, send_stream, recv_stream,
+ filter);
+}
+bool MediaRecorder::AddChannel(VideoChannel* channel,
+ talk_base::StreamInterface* send_stream,
+ talk_base::StreamInterface* recv_stream,
+ int filter) {
+ return InternalAddChannel(channel, true, send_stream, recv_stream,
+ filter);
+}
+
+bool MediaRecorder::InternalAddChannel(BaseChannel* channel,
+ bool video_channel,
+ talk_base::StreamInterface* send_stream,
+ talk_base::StreamInterface* recv_stream,
+ int filter) {
+ if (!channel) {
+ return false;
+ }
+
+ talk_base::CritScope cs(&critical_section_);
+ if (sinks_.end() != sinks_.find(channel)) {
+ return false; // The channel was added already.
+ }
+
+ SinkPair* sink_pair = new SinkPair;
+ sink_pair->video_channel = video_channel;
+ sink_pair->filter = filter;
+ sink_pair->send_sink.reset(new RtpDumpSink(send_stream));
+ sink_pair->send_sink->set_packet_filter(filter);
+ sink_pair->recv_sink.reset(new RtpDumpSink(recv_stream));
+ sink_pair->recv_sink->set_packet_filter(filter);
+ sinks_[channel] = sink_pair;
+
+ return true;
+}
+
+void MediaRecorder::RemoveChannel(BaseChannel* channel,
+ SinkType type) {
+ talk_base::CritScope cs(&critical_section_);
+ std::map<BaseChannel*, SinkPair*>::iterator itr = sinks_.find(channel);
+ if (sinks_.end() != itr) {
+ channel->UnregisterSendSink(itr->second->send_sink.get(), type);
+ channel->UnregisterRecvSink(itr->second->recv_sink.get(), type);
+ delete itr->second;
+ sinks_.erase(itr);
+ }
+}
+
+bool MediaRecorder::EnableChannel(
+ BaseChannel* channel, bool enable_send, bool enable_recv,
+ SinkType type) {
+ talk_base::CritScope cs(&critical_section_);
+ std::map<BaseChannel*, SinkPair*>::iterator itr = sinks_.find(channel);
+ if (sinks_.end() == itr) {
+ return false;
+ }
+
+ SinkPair* sink_pair = itr->second;
+ RtpDumpSink* sink = sink_pair->send_sink.get();
+ sink->Enable(enable_send);
+ if (enable_send) {
+ channel->RegisterSendSink(sink, &RtpDumpSink::OnPacket, type);
+ } else {
+ channel->UnregisterSendSink(sink, type);
+ }
+
+ sink = sink_pair->recv_sink.get();
+ sink->Enable(enable_recv);
+ if (enable_recv) {
+ channel->RegisterRecvSink(sink, &RtpDumpSink::OnPacket, type);
+ } else {
+ channel->UnregisterRecvSink(sink, type);
+ }
+
+ if (sink_pair->video_channel &&
+ (sink_pair->filter & PF_RTPPACKET) == PF_RTPPACKET) {
+ // Request a full intra frame.
+ VideoChannel* video_channel = static_cast<VideoChannel*>(channel);
+ if (enable_send) {
+ video_channel->SendIntraFrame();
+ }
+ if (enable_recv) {
+ video_channel->RequestIntraFrame();
+ }
+ }
+
+ return true;
+}
+
+void MediaRecorder::FlushSinks() {
+ talk_base::CritScope cs(&critical_section_);
+ std::map<BaseChannel*, SinkPair*>::iterator itr;
+ for (itr = sinks_.begin(); itr != sinks_.end(); ++itr) {
+ itr->second->send_sink->Flush();
+ itr->second->recv_sink->Flush();
+ }
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediarecorder.h b/chromium/third_party/libjingle/source/talk/session/media/mediarecorder.h
new file mode 100644
index 00000000000..df22e984dc9
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediarecorder.h
@@ -0,0 +1,119 @@
+/*
+ * libjingle
+ * Copyright 2010 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_MEDIARECORDER_H_
+#define TALK_SESSION_MEDIA_MEDIARECORDER_H_
+
+#include <map>
+#include <string>
+
+#include "talk/base/criticalsection.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sigslot.h"
+#include "talk/session/media/channel.h"
+#include "talk/session/media/mediasink.h"
+
+namespace talk_base {
+class Pathname;
+class FileStream;
+}
+
+namespace cricket {
+
+class BaseChannel;
+class VideoChannel;
+class VoiceChannel;
+class RtpDumpWriter;
+
+// RtpDumpSink implements MediaSinkInterface by dumping the RTP/RTCP packets to
+// a file.
+class RtpDumpSink : public MediaSinkInterface, public sigslot::has_slots<> {
+ public:
+ // Takes ownership of stream.
+ explicit RtpDumpSink(talk_base::StreamInterface* stream);
+ virtual ~RtpDumpSink();
+
+ virtual void SetMaxSize(size_t size);
+ virtual bool Enable(bool enable);
+ virtual bool IsEnabled() const { return recording_; }
+ virtual void OnPacket(const void* data, size_t size, bool rtcp);
+ virtual void set_packet_filter(int filter);
+ int packet_filter() const { return packet_filter_; }
+ void Flush();
+
+ private:
+ size_t max_size_;
+ bool recording_;
+ int packet_filter_;
+ talk_base::scoped_ptr<talk_base::StreamInterface> stream_;
+ talk_base::scoped_ptr<RtpDumpWriter> writer_;
+ talk_base::CriticalSection critical_section_;
+
+ DISALLOW_COPY_AND_ASSIGN(RtpDumpSink);
+};
+
+class MediaRecorder {
+ public:
+ MediaRecorder();
+ virtual ~MediaRecorder();
+
+ bool AddChannel(VoiceChannel* channel,
+ talk_base::StreamInterface* send_stream,
+ talk_base::StreamInterface* recv_stream,
+ int filter);
+ bool AddChannel(VideoChannel* channel,
+ talk_base::StreamInterface* send_stream,
+ talk_base::StreamInterface* recv_stream,
+ int filter);
+ void RemoveChannel(BaseChannel* channel, SinkType type);
+ bool EnableChannel(BaseChannel* channel, bool enable_send, bool enable_recv,
+ SinkType type);
+ void FlushSinks();
+
+ private:
+ struct SinkPair {
+ bool video_channel;
+ int filter;
+ talk_base::scoped_ptr<RtpDumpSink> send_sink;
+ talk_base::scoped_ptr<RtpDumpSink> recv_sink;
+ };
+
+ bool InternalAddChannel(BaseChannel* channel,
+ bool video_channel,
+ talk_base::StreamInterface* send_stream,
+ talk_base::StreamInterface* recv_stream,
+ int filter);
+
+ std::map<BaseChannel*, SinkPair*> sinks_;
+ talk_base::CriticalSection critical_section_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaRecorder);
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_MEDIARECORDER_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediarecorder_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/mediarecorder_unittest.cc
new file mode 100644
index 00000000000..5155e6dd3a1
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediarecorder_unittest.cc
@@ -0,0 +1,358 @@
+// libjingle
+// Copyright 2010 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <string>
+
+#include "talk/base/bytebuffer.h"
+#include "talk/base/fileutils.h"
+#include "talk/base/gunit.h"
+#include "talk/base/pathutils.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/media/base/rtpdump.h"
+#include "talk/media/base/testutils.h"
+#include "talk/p2p/base/fakesession.h"
+#include "talk/session/media/channel.h"
+#include "talk/session/media/mediarecorder.h"
+
+namespace cricket {
+
+talk_base::StreamInterface* Open(const std::string& path) {
+ return talk_base::Filesystem::OpenFile(
+ talk_base::Pathname(path), "wb");
+}
+
+/////////////////////////////////////////////////////////////////////////
+// Test RtpDumpSink
+/////////////////////////////////////////////////////////////////////////
+class RtpDumpSinkTest : public testing::Test {
+ public:
+ virtual void SetUp() {
+ EXPECT_TRUE(talk_base::Filesystem::GetTemporaryFolder(path_, true, NULL));
+ path_.SetFilename("sink-test.rtpdump");
+ sink_.reset(new RtpDumpSink(Open(path_.pathname())));
+
+ for (int i = 0; i < ARRAY_SIZE(rtp_buf_); ++i) {
+ RtpTestUtility::kTestRawRtpPackets[i].WriteToByteBuffer(
+ RtpTestUtility::kDefaultSsrc, &rtp_buf_[i]);
+ }
+ }
+
+ virtual void TearDown() {
+ stream_.reset();
+ EXPECT_TRUE(talk_base::Filesystem::DeleteFile(path_));
+ }
+
+ protected:
+ void OnRtpPacket(const RawRtpPacket& raw) {
+ talk_base::ByteBuffer buf;
+ raw.WriteToByteBuffer(RtpTestUtility::kDefaultSsrc, &buf);
+ sink_->OnPacket(buf.Data(), buf.Length(), false);
+ }
+
+ talk_base::StreamResult ReadPacket(RtpDumpPacket* packet) {
+ if (!stream_.get()) {
+ sink_.reset(); // This will close the file. So we can read it.
+ stream_.reset(talk_base::Filesystem::OpenFile(path_, "rb"));
+ reader_.reset(new RtpDumpReader(stream_.get()));
+ }
+ return reader_->ReadPacket(packet);
+ }
+
+ talk_base::Pathname path_;
+ talk_base::scoped_ptr<RtpDumpSink> sink_;
+ talk_base::ByteBuffer rtp_buf_[3];
+ talk_base::scoped_ptr<talk_base::StreamInterface> stream_;
+ talk_base::scoped_ptr<RtpDumpReader> reader_;
+};
+
+TEST_F(RtpDumpSinkTest, TestRtpDumpSink) {
+ // By default, the sink is disabled. The 1st packet is not written.
+ EXPECT_FALSE(sink_->IsEnabled());
+ sink_->set_packet_filter(PF_ALL);
+ OnRtpPacket(RtpTestUtility::kTestRawRtpPackets[0]);
+
+ // Enable the sink. The 2nd packet is written.
+ EXPECT_TRUE(sink_->Enable(true));
+ EXPECT_TRUE(sink_->IsEnabled());
+ EXPECT_TRUE(talk_base::Filesystem::IsFile(path_.pathname()));
+ OnRtpPacket(RtpTestUtility::kTestRawRtpPackets[1]);
+
+ // Disable the sink. The 3rd packet is not written.
+ EXPECT_TRUE(sink_->Enable(false));
+ EXPECT_FALSE(sink_->IsEnabled());
+ OnRtpPacket(RtpTestUtility::kTestRawRtpPackets[2]);
+
+ // Read the recorded file and verify it contains only the 2nd packet.
+ RtpDumpPacket packet;
+ EXPECT_EQ(talk_base::SR_SUCCESS, ReadPacket(&packet));
+ EXPECT_TRUE(RtpTestUtility::VerifyPacket(
+ &packet, &RtpTestUtility::kTestRawRtpPackets[1], false));
+ EXPECT_EQ(talk_base::SR_EOS, ReadPacket(&packet));
+}
+
+TEST_F(RtpDumpSinkTest, TestRtpDumpSinkMaxSize) {
+ EXPECT_TRUE(sink_->Enable(true));
+ sink_->set_packet_filter(PF_ALL);
+ sink_->SetMaxSize(strlen(RtpDumpFileHeader::kFirstLine) +
+ RtpDumpFileHeader::kHeaderLength +
+ RtpDumpPacket::kHeaderLength +
+ RtpTestUtility::kTestRawRtpPackets[0].size());
+ OnRtpPacket(RtpTestUtility::kTestRawRtpPackets[0]);
+
+ // Exceed the limit size: the 2nd and 3rd packets are not written.
+ OnRtpPacket(RtpTestUtility::kTestRawRtpPackets[1]);
+ OnRtpPacket(RtpTestUtility::kTestRawRtpPackets[2]);
+
+ // Read the recorded file and verify that it contains only the first packet.
+ RtpDumpPacket packet;
+ EXPECT_EQ(talk_base::SR_SUCCESS, ReadPacket(&packet));
+ EXPECT_TRUE(RtpTestUtility::VerifyPacket(
+ &packet, &RtpTestUtility::kTestRawRtpPackets[0], false));
+ EXPECT_EQ(talk_base::SR_EOS, ReadPacket(&packet));
+}
+
+TEST_F(RtpDumpSinkTest, TestRtpDumpSinkFilter) {
+ // The default filter is PF_NONE.
+ EXPECT_EQ(PF_NONE, sink_->packet_filter());
+
+ // Set to PF_RTPHEADER before enable.
+ sink_->set_packet_filter(PF_RTPHEADER);
+ EXPECT_EQ(PF_RTPHEADER, sink_->packet_filter());
+ EXPECT_TRUE(sink_->Enable(true));
+ // We dump only the header of the first packet.
+ OnRtpPacket(RtpTestUtility::kTestRawRtpPackets[0]);
+
+ // Set the filter to PF_RTPPACKET. We dump all the second packet.
+ sink_->set_packet_filter(PF_RTPPACKET);
+ EXPECT_EQ(PF_RTPPACKET, sink_->packet_filter());
+ OnRtpPacket(RtpTestUtility::kTestRawRtpPackets[1]);
+
+ // Set the filter to PF_NONE. We do not dump the third packet.
+ sink_->set_packet_filter(PF_NONE);
+ EXPECT_EQ(PF_NONE, sink_->packet_filter());
+ OnRtpPacket(RtpTestUtility::kTestRawRtpPackets[2]);
+
+ // Read the recorded file and verify the header of the first packet and
+ // the whole packet for the second packet.
+ RtpDumpPacket packet;
+ EXPECT_EQ(talk_base::SR_SUCCESS, ReadPacket(&packet));
+ EXPECT_TRUE(RtpTestUtility::VerifyPacket(
+ &packet, &RtpTestUtility::kTestRawRtpPackets[0], true));
+ EXPECT_EQ(talk_base::SR_SUCCESS, ReadPacket(&packet));
+ EXPECT_TRUE(RtpTestUtility::VerifyPacket(
+ &packet, &RtpTestUtility::kTestRawRtpPackets[1], false));
+ EXPECT_EQ(talk_base::SR_EOS, ReadPacket(&packet));
+}
+
+/////////////////////////////////////////////////////////////////////////
+// Test MediaRecorder
+/////////////////////////////////////////////////////////////////////////
+void TestMediaRecorder(BaseChannel* channel,
+ FakeVideoMediaChannel* video_media_channel,
+ int filter) {
+ // Create media recorder.
+ talk_base::scoped_ptr<MediaRecorder> recorder(new MediaRecorder);
+ // Fail to EnableChannel before AddChannel.
+ EXPECT_FALSE(recorder->EnableChannel(channel, true, true, SINK_PRE_CRYPTO));
+ EXPECT_FALSE(channel->HasSendSinks(SINK_PRE_CRYPTO));
+ EXPECT_FALSE(channel->HasRecvSinks(SINK_PRE_CRYPTO));
+ EXPECT_FALSE(channel->HasSendSinks(SINK_POST_CRYPTO));
+ EXPECT_FALSE(channel->HasRecvSinks(SINK_POST_CRYPTO));
+
+ // Add the channel to the recorder.
+ talk_base::Pathname path;
+ EXPECT_TRUE(talk_base::Filesystem::GetTemporaryFolder(path, true, NULL));
+ path.SetFilename("send.rtpdump");
+ std::string send_file = path.pathname();
+ path.SetFilename("recv.rtpdump");
+ std::string recv_file = path.pathname();
+ if (video_media_channel) {
+ EXPECT_TRUE(recorder->AddChannel(static_cast<VideoChannel*>(channel),
+ Open(send_file), Open(recv_file), filter));
+ } else {
+ EXPECT_TRUE(recorder->AddChannel(static_cast<VoiceChannel*>(channel),
+ Open(send_file), Open(recv_file), filter));
+ }
+
+ // Enable recording only the sent media.
+ EXPECT_TRUE(recorder->EnableChannel(channel, true, false, SINK_PRE_CRYPTO));
+ EXPECT_TRUE(channel->HasSendSinks(SINK_PRE_CRYPTO));
+ EXPECT_FALSE(channel->HasRecvSinks(SINK_POST_CRYPTO));
+ EXPECT_FALSE(channel->HasSendSinks(SINK_POST_CRYPTO));
+ EXPECT_FALSE(channel->HasRecvSinks(SINK_POST_CRYPTO));
+ if (video_media_channel) {
+ EXPECT_TRUE_WAIT(video_media_channel->sent_intra_frame(), 100);
+ }
+
+ // Enable recording only the received meida.
+ EXPECT_TRUE(recorder->EnableChannel(channel, false, true, SINK_PRE_CRYPTO));
+ EXPECT_FALSE(channel->HasSendSinks(SINK_PRE_CRYPTO));
+ EXPECT_TRUE(channel->HasRecvSinks(SINK_PRE_CRYPTO));
+ if (video_media_channel) {
+ EXPECT_TRUE(video_media_channel->requested_intra_frame());
+ }
+
+ // Enable recording both the sent and the received video.
+ EXPECT_TRUE(recorder->EnableChannel(channel, true, true, SINK_PRE_CRYPTO));
+ EXPECT_TRUE(channel->HasSendSinks(SINK_PRE_CRYPTO));
+ EXPECT_TRUE(channel->HasRecvSinks(SINK_PRE_CRYPTO));
+
+ // Enable recording only headers.
+ if (video_media_channel) {
+ video_media_channel->set_sent_intra_frame(false);
+ video_media_channel->set_requested_intra_frame(false);
+ }
+ EXPECT_TRUE(recorder->EnableChannel(channel, true, true, SINK_PRE_CRYPTO));
+ EXPECT_TRUE(channel->HasSendSinks(SINK_PRE_CRYPTO));
+ EXPECT_TRUE(channel->HasRecvSinks(SINK_PRE_CRYPTO));
+ if (video_media_channel) {
+ if ((filter & PF_RTPPACKET) == PF_RTPPACKET) {
+ // If record the whole RTP packet, trigers FIR.
+ EXPECT_TRUE(video_media_channel->requested_intra_frame());
+ EXPECT_TRUE(video_media_channel->sent_intra_frame());
+ } else {
+ // If record only the RTP header, does not triger FIR.
+ EXPECT_FALSE(video_media_channel->requested_intra_frame());
+ EXPECT_FALSE(video_media_channel->sent_intra_frame());
+ }
+ }
+
+ // Remove the voice channel from the recorder.
+ recorder->RemoveChannel(channel, SINK_PRE_CRYPTO);
+ EXPECT_FALSE(channel->HasSendSinks(SINK_PRE_CRYPTO));
+ EXPECT_FALSE(channel->HasRecvSinks(SINK_PRE_CRYPTO));
+
+ // Delete all files.
+ recorder.reset();
+ EXPECT_TRUE(talk_base::Filesystem::DeleteFile(send_file));
+ EXPECT_TRUE(talk_base::Filesystem::DeleteFile(recv_file));
+}
+
+// Fisrt start recording header and then start recording media. Verify that
+// differnt files are created for header and media.
+void TestRecordHeaderAndMedia(BaseChannel* channel,
+ FakeVideoMediaChannel* video_media_channel) {
+ // Create RTP header recorder.
+ talk_base::scoped_ptr<MediaRecorder> header_recorder(new MediaRecorder);
+
+ talk_base::Pathname path;
+ EXPECT_TRUE(talk_base::Filesystem::GetTemporaryFolder(path, true, NULL));
+ path.SetFilename("send-header.rtpdump");
+ std::string send_header_file = path.pathname();
+ path.SetFilename("recv-header.rtpdump");
+ std::string recv_header_file = path.pathname();
+ if (video_media_channel) {
+ EXPECT_TRUE(header_recorder->AddChannel(
+ static_cast<VideoChannel*>(channel),
+ Open(send_header_file), Open(recv_header_file), PF_RTPHEADER));
+ } else {
+ EXPECT_TRUE(header_recorder->AddChannel(
+ static_cast<VoiceChannel*>(channel),
+ Open(send_header_file), Open(recv_header_file), PF_RTPHEADER));
+ }
+
+ // Enable recording both sent and received.
+ EXPECT_TRUE(
+ header_recorder->EnableChannel(channel, true, true, SINK_POST_CRYPTO));
+ EXPECT_TRUE(channel->HasSendSinks(SINK_POST_CRYPTO));
+ EXPECT_TRUE(channel->HasRecvSinks(SINK_POST_CRYPTO));
+ EXPECT_FALSE(channel->HasSendSinks(SINK_PRE_CRYPTO));
+ EXPECT_FALSE(channel->HasRecvSinks(SINK_PRE_CRYPTO));
+ if (video_media_channel) {
+ EXPECT_FALSE(video_media_channel->sent_intra_frame());
+ EXPECT_FALSE(video_media_channel->requested_intra_frame());
+ }
+
+ // Verify that header files are created.
+ EXPECT_TRUE(talk_base::Filesystem::IsFile(send_header_file));
+ EXPECT_TRUE(talk_base::Filesystem::IsFile(recv_header_file));
+
+ // Create RTP header recorder.
+ talk_base::scoped_ptr<MediaRecorder> recorder(new MediaRecorder);
+ path.SetFilename("send.rtpdump");
+ std::string send_file = path.pathname();
+ path.SetFilename("recv.rtpdump");
+ std::string recv_file = path.pathname();
+ if (video_media_channel) {
+ EXPECT_TRUE(recorder->AddChannel(
+ static_cast<VideoChannel*>(channel),
+ Open(send_file), Open(recv_file), PF_RTPPACKET));
+ } else {
+ EXPECT_TRUE(recorder->AddChannel(
+ static_cast<VoiceChannel*>(channel),
+ Open(send_file), Open(recv_file), PF_RTPPACKET));
+ }
+
+ // Enable recording both sent and received.
+ EXPECT_TRUE(recorder->EnableChannel(channel, true, true, SINK_PRE_CRYPTO));
+ EXPECT_TRUE(channel->HasSendSinks(SINK_POST_CRYPTO));
+ EXPECT_TRUE(channel->HasRecvSinks(SINK_POST_CRYPTO));
+ EXPECT_TRUE(channel->HasSendSinks(SINK_PRE_CRYPTO));
+ EXPECT_TRUE(channel->HasRecvSinks(SINK_PRE_CRYPTO));
+ if (video_media_channel) {
+ EXPECT_TRUE_WAIT(video_media_channel->sent_intra_frame(), 100);
+ EXPECT_TRUE(video_media_channel->requested_intra_frame());
+ }
+
+ // Verify that media files are created.
+ EXPECT_TRUE(talk_base::Filesystem::IsFile(send_file));
+ EXPECT_TRUE(talk_base::Filesystem::IsFile(recv_file));
+
+ // Delete all files.
+ header_recorder.reset();
+ recorder.reset();
+ EXPECT_TRUE(talk_base::Filesystem::DeleteFile(send_header_file));
+ EXPECT_TRUE(talk_base::Filesystem::DeleteFile(recv_header_file));
+ EXPECT_TRUE(talk_base::Filesystem::DeleteFile(send_file));
+ EXPECT_TRUE(talk_base::Filesystem::DeleteFile(recv_file));
+}
+
+TEST(MediaRecorderTest, TestMediaRecorderVoiceChannel) {
+ // Create the voice channel.
+ FakeSession session(true);
+ FakeMediaEngine media_engine;
+ VoiceChannel channel(talk_base::Thread::Current(), &media_engine,
+ new FakeVoiceMediaChannel(NULL), &session, "", false);
+ EXPECT_TRUE(channel.Init());
+ TestMediaRecorder(&channel, NULL, PF_RTPPACKET);
+ TestMediaRecorder(&channel, NULL, PF_RTPHEADER);
+ TestRecordHeaderAndMedia(&channel, NULL);
+}
+
+TEST(MediaRecorderTest, TestMediaRecorderVideoChannel) {
+ // Create the video channel.
+ FakeSession session(true);
+ FakeMediaEngine media_engine;
+ FakeVideoMediaChannel* media_channel = new FakeVideoMediaChannel(NULL);
+ VideoChannel channel(talk_base::Thread::Current(), &media_engine,
+ media_channel, &session, "", false, NULL);
+ EXPECT_TRUE(channel.Init());
+ TestMediaRecorder(&channel, media_channel, PF_RTPPACKET);
+ TestMediaRecorder(&channel, media_channel, PF_RTPHEADER);
+ TestRecordHeaderAndMedia(&channel, media_channel);
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediasession.cc b/chromium/third_party/libjingle/source/talk/session/media/mediasession.cc
new file mode 100644
index 00000000000..1215008b05d
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediasession.cc
@@ -0,0 +1,1657 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/mediasession.h"
+
+#include <functional>
+#include <map>
+#include <set>
+#include <utility>
+
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stringutils.h"
+#include "talk/media/base/constants.h"
+#include "talk/media/base/cryptoparams.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/session/media/channelmanager.h"
+#include "talk/session/media/srtpfilter.h"
+#include "talk/xmpp/constants.h"
+
+namespace {
+const char kInline[] = "inline:";
+}
+
+namespace cricket {
+
+using talk_base::scoped_ptr;
+
+// RTP Profile names
+// http://www.iana.org/assignments/rtp-parameters/rtp-parameters.xml
+// RFC4585
+const char kMediaProtocolAvpf[] = "RTP/AVPF";
+// RFC5124
+const char kMediaProtocolSavpf[] = "RTP/SAVPF";
+
+const char kMediaProtocolRtpPrefix[] = "RTP/";
+
+const char kMediaProtocolSctp[] = "SCTP";
+const char kMediaProtocolDtlsSctp[] = "DTLS/SCTP";
+
+static bool IsMediaContentOfType(const ContentInfo* content,
+ MediaType media_type) {
+ if (!IsMediaContent(content)) {
+ return false;
+ }
+
+ const MediaContentDescription* mdesc =
+ static_cast<const MediaContentDescription*>(content->description);
+ return mdesc && mdesc->type() == media_type;
+}
+
+static bool CreateCryptoParams(int tag, const std::string& cipher,
+ CryptoParams *out) {
+ std::string key;
+ key.reserve(SRTP_MASTER_KEY_BASE64_LEN);
+
+ if (!talk_base::CreateRandomString(SRTP_MASTER_KEY_BASE64_LEN, &key)) {
+ return false;
+ }
+ out->tag = tag;
+ out->cipher_suite = cipher;
+ out->key_params = kInline;
+ out->key_params += key;
+ return true;
+}
+
+#ifdef HAVE_SRTP
+static bool AddCryptoParams(const std::string& cipher_suite,
+ CryptoParamsVec *out) {
+ int size = static_cast<int>(out->size());
+
+ out->resize(size + 1);
+ return CreateCryptoParams(size, cipher_suite, &out->at(size));
+}
+
+void AddMediaCryptos(const CryptoParamsVec& cryptos,
+ MediaContentDescription* media) {
+ for (CryptoParamsVec::const_iterator crypto = cryptos.begin();
+ crypto != cryptos.end(); ++crypto) {
+ media->AddCrypto(*crypto);
+ }
+}
+
+bool CreateMediaCryptos(const std::vector<std::string>& crypto_suites,
+ MediaContentDescription* media) {
+ CryptoParamsVec cryptos;
+ for (std::vector<std::string>::const_iterator it = crypto_suites.begin();
+ it != crypto_suites.end(); ++it) {
+ if (!AddCryptoParams(*it, &cryptos)) {
+ return false;
+ }
+ }
+ AddMediaCryptos(cryptos, media);
+ return true;
+}
+#endif
+
+const CryptoParamsVec* GetCryptos(const MediaContentDescription* media) {
+ if (!media) {
+ return NULL;
+ }
+ return &media->cryptos();
+}
+
+bool FindMatchingCrypto(const CryptoParamsVec& cryptos,
+ const CryptoParams& crypto,
+ CryptoParams* out) {
+ for (CryptoParamsVec::const_iterator it = cryptos.begin();
+ it != cryptos.end(); ++it) {
+ if (crypto.Matches(*it)) {
+ *out = *it;
+ return true;
+ }
+ }
+ return false;
+}
+
+// For audio, HMAC 32 is prefered because of the low overhead.
+void GetSupportedAudioCryptoSuites(
+ std::vector<std::string>* crypto_suites) {
+#ifdef HAVE_SRTP
+ crypto_suites->push_back(CS_AES_CM_128_HMAC_SHA1_32);
+ crypto_suites->push_back(CS_AES_CM_128_HMAC_SHA1_80);
+#endif
+}
+
+void GetSupportedVideoCryptoSuites(
+ std::vector<std::string>* crypto_suites) {
+ GetSupportedDefaultCryptoSuites(crypto_suites);
+}
+
+void GetSupportedDataCryptoSuites(
+ std::vector<std::string>* crypto_suites) {
+ GetSupportedDefaultCryptoSuites(crypto_suites);
+}
+
+void GetSupportedDefaultCryptoSuites(
+ std::vector<std::string>* crypto_suites) {
+#ifdef HAVE_SRTP
+ crypto_suites->push_back(CS_AES_CM_128_HMAC_SHA1_80);
+#endif
+}
+
+// For video support only 80-bit SHA1 HMAC. For audio 32-bit HMAC is
+// tolerated unless bundle is enabled because it is low overhead. Pick the
+// crypto in the list that is supported.
+static bool SelectCrypto(const MediaContentDescription* offer,
+ bool bundle,
+ CryptoParams *crypto) {
+ bool audio = offer->type() == MEDIA_TYPE_AUDIO;
+ const CryptoParamsVec& cryptos = offer->cryptos();
+
+ for (CryptoParamsVec::const_iterator i = cryptos.begin();
+ i != cryptos.end(); ++i) {
+ if (CS_AES_CM_128_HMAC_SHA1_80 == i->cipher_suite ||
+ (CS_AES_CM_128_HMAC_SHA1_32 == i->cipher_suite && audio && !bundle)) {
+ return CreateCryptoParams(i->tag, i->cipher_suite, crypto);
+ }
+ }
+ return false;
+}
+
+static const StreamParams* FindFirstStreamParamsByCname(
+ const StreamParamsVec& params_vec,
+ const std::string& cname) {
+ for (StreamParamsVec::const_iterator it = params_vec.begin();
+ it != params_vec.end(); ++it) {
+ if (cname == it->cname)
+ return &*it;
+ }
+ return NULL;
+}
+
+// Generates a new CNAME or the CNAME of an already existing StreamParams
+// if a StreamParams exist for another Stream in streams with sync_label
+// sync_label.
+static bool GenerateCname(const StreamParamsVec& params_vec,
+ const MediaSessionOptions::Streams& streams,
+ const std::string& synch_label,
+ std::string* cname) {
+ ASSERT(cname != NULL);
+ if (!cname)
+ return false;
+
+ // Check if a CNAME exist for any of the other synched streams.
+ for (MediaSessionOptions::Streams::const_iterator stream_it = streams.begin();
+ stream_it != streams.end() ; ++stream_it) {
+ if (synch_label != stream_it->sync_label)
+ continue;
+
+ StreamParams param;
+ // groupid is empty for StreamParams generated using
+ // MediaSessionDescriptionFactory.
+ if (GetStreamByIds(params_vec, "", stream_it->id,
+ &param)) {
+ *cname = param.cname;
+ return true;
+ }
+ }
+ // No other stream seems to exist that we should sync with.
+ // Generate a random string for the RTCP CNAME, as stated in RFC 6222.
+ // This string is only used for synchronization, and therefore is opaque.
+ do {
+ if (!talk_base::CreateRandomString(16, cname)) {
+ ASSERT(false);
+ return false;
+ }
+ } while (FindFirstStreamParamsByCname(params_vec, *cname));
+
+ return true;
+}
+
+// Generate random SSRC values that are not already present in |params_vec|.
+// Either 2 or 1 ssrcs will be generated based on |include_rtx_stream| being
+// true or false. The generated values are added to |ssrcs|.
+static void GenerateSsrcs(const StreamParamsVec& params_vec,
+ bool include_rtx_stream,
+ std::vector<uint32>* ssrcs) {
+ unsigned int num_ssrcs = include_rtx_stream ? 2 : 1;
+ for (unsigned int i = 0; i < num_ssrcs; i++) {
+ uint32 candidate;
+ do {
+ candidate = talk_base::CreateRandomNonZeroId();
+ } while (GetStreamBySsrc(params_vec, candidate, NULL) ||
+ std::count(ssrcs->begin(), ssrcs->end(), candidate) > 0);
+ ssrcs->push_back(candidate);
+ }
+}
+
+// Returns false if we exhaust the range of SIDs.
+static bool GenerateSctpSid(const StreamParamsVec& params_vec,
+ uint32* sid) {
+ if (params_vec.size() > kMaxSctpSid) {
+ LOG(LS_WARNING) <<
+ "Could not generate an SCTP SID: too many SCTP streams.";
+ return false;
+ }
+ while (true) {
+ uint32 candidate = talk_base::CreateRandomNonZeroId() % kMaxSctpSid;
+ if (!GetStreamBySsrc(params_vec, candidate, NULL)) {
+ *sid = candidate;
+ return true;
+ }
+ }
+}
+
+static bool GenerateSctpSids(const StreamParamsVec& params_vec,
+ std::vector<uint32>* sids) {
+ uint32 sid;
+ if (!GenerateSctpSid(params_vec, &sid)) {
+ LOG(LS_WARNING) << "Could not generated an SCTP SID.";
+ return false;
+ }
+ sids->push_back(sid);
+ return true;
+}
+
+// Finds all StreamParams of all media types and attach them to stream_params.
+static void GetCurrentStreamParams(const SessionDescription* sdesc,
+ StreamParamsVec* stream_params) {
+ if (!sdesc)
+ return;
+
+ const ContentInfos& contents = sdesc->contents();
+ for (ContentInfos::const_iterator content = contents.begin();
+ content != contents.end(); ++content) {
+ if (!IsMediaContent(&*content)) {
+ continue;
+ }
+ const MediaContentDescription* media =
+ static_cast<const MediaContentDescription*>(
+ content->description);
+ const StreamParamsVec& streams = media->streams();
+ for (StreamParamsVec::const_iterator it = streams.begin();
+ it != streams.end(); ++it) {
+ stream_params->push_back(*it);
+ }
+ }
+}
+
+template <typename IdStruct>
+class UsedIds {
+ public:
+ UsedIds(int min_allowed_id, int max_allowed_id)
+ : min_allowed_id_(min_allowed_id),
+ max_allowed_id_(max_allowed_id),
+ next_id_(max_allowed_id) {
+ }
+
+ // Loops through all Id in |ids| and changes its id if it is
+ // already in use by another IdStruct. Call this methods with all Id
+ // in a session description to make sure no duplicate ids exists.
+ // Note that typename Id must be a type of IdStruct.
+ template <typename Id>
+ void FindAndSetIdUsed(std::vector<Id>* ids) {
+ for (typename std::vector<Id>::iterator it = ids->begin();
+ it != ids->end(); ++it) {
+ FindAndSetIdUsed(&*it);
+ }
+ }
+
+ // Finds and sets an unused id if the |idstruct| id is already in use.
+ void FindAndSetIdUsed(IdStruct* idstruct) {
+ const int original_id = idstruct->id;
+ int new_id = idstruct->id;
+
+ if (original_id > max_allowed_id_ || original_id < min_allowed_id_) {
+ // If the original id is not in range - this is an id that can't be
+ // dynamically changed.
+ return;
+ }
+
+ if (IsIdUsed(original_id)) {
+ new_id = FindUnusedId();
+ LOG(LS_WARNING) << "Duplicate id found. Reassigning from " << original_id
+ << " to " << new_id;
+ idstruct->id = new_id;
+ }
+ SetIdUsed(new_id);
+ }
+
+ private:
+ // Returns the first unused id in reverse order.
+ // This hopefully reduce the risk of more collisions. We want to change the
+ // default ids as little as possible.
+ int FindUnusedId() {
+ while (IsIdUsed(next_id_) && next_id_ >= min_allowed_id_) {
+ --next_id_;
+ }
+ ASSERT(next_id_ >= min_allowed_id_);
+ return next_id_;
+ }
+
+ bool IsIdUsed(int new_id) {
+ return id_set_.find(new_id) != id_set_.end();
+ }
+
+ void SetIdUsed(int new_id) {
+ id_set_.insert(new_id);
+ }
+
+ const int min_allowed_id_;
+ const int max_allowed_id_;
+ int next_id_;
+ std::set<int> id_set_;
+};
+
+// Helper class used for finding duplicate RTP payload types among audio, video
+// and data codecs. When bundle is used the payload types may not collide.
+class UsedPayloadTypes : public UsedIds<Codec> {
+ public:
+ UsedPayloadTypes()
+ : UsedIds<Codec>(kDynamicPayloadTypeMin, kDynamicPayloadTypeMax) {
+ }
+
+
+ private:
+ static const int kDynamicPayloadTypeMin = 96;
+ static const int kDynamicPayloadTypeMax = 127;
+};
+
+// Helper class used for finding duplicate RTP Header extension ids among
+// audio and video extensions.
+class UsedRtpHeaderExtensionIds : public UsedIds<RtpHeaderExtension> {
+ public:
+ UsedRtpHeaderExtensionIds()
+ : UsedIds<RtpHeaderExtension>(kLocalIdMin, kLocalIdMax) {
+ }
+
+ private:
+ // Min and Max local identifier as specified by RFC5285.
+ static const int kLocalIdMin = 1;
+ static const int kLocalIdMax = 255;
+};
+
+static bool IsSctp(const MediaContentDescription* desc) {
+ return ((desc->protocol() == kMediaProtocolSctp) ||
+ (desc->protocol() == kMediaProtocolDtlsSctp));
+}
+
+// Adds a StreamParams for each Stream in Streams with media type
+// media_type to content_description.
+// |current_params| - All currently known StreamParams of any media type.
+template <class C>
+static bool AddStreamParams(
+ MediaType media_type,
+ const MediaSessionOptions::Streams& streams,
+ StreamParamsVec* current_streams,
+ MediaContentDescriptionImpl<C>* content_description,
+ const bool add_legacy_stream) {
+ const bool include_rtx_stream =
+ ContainsRtxCodec(content_description->codecs());
+
+ if (streams.empty() && add_legacy_stream) {
+ // TODO(perkj): Remove this legacy stream when all apps use StreamParams.
+ std::vector<uint32> ssrcs;
+ if (IsSctp(content_description)) {
+ GenerateSctpSids(*current_streams, &ssrcs);
+ } else {
+ GenerateSsrcs(*current_streams, include_rtx_stream, &ssrcs);
+ }
+ if (include_rtx_stream) {
+ content_description->AddLegacyStream(ssrcs[0], ssrcs[1]);
+ content_description->set_multistream(true);
+ } else {
+ content_description->AddLegacyStream(ssrcs[0]);
+ }
+ return true;
+ }
+
+ MediaSessionOptions::Streams::const_iterator stream_it;
+ for (stream_it = streams.begin();
+ stream_it != streams.end(); ++stream_it) {
+ if (stream_it->type != media_type)
+ continue; // Wrong media type.
+
+ StreamParams param;
+ // groupid is empty for StreamParams generated using
+ // MediaSessionDescriptionFactory.
+ if (!GetStreamByIds(*current_streams, "", stream_it->id,
+ &param)) {
+ // This is a new stream.
+ // Get a CNAME. Either new or same as one of the other synched streams.
+ std::string cname;
+ if (!GenerateCname(*current_streams, streams, stream_it->sync_label,
+ &cname)) {
+ return false;
+ }
+
+ std::vector<uint32> ssrcs;
+ if (IsSctp(content_description)) {
+ GenerateSctpSids(*current_streams, &ssrcs);
+ } else {
+ GenerateSsrcs(*current_streams, include_rtx_stream, &ssrcs);
+ }
+ StreamParams stream_param;
+ stream_param.id = stream_it->id;
+ stream_param.ssrcs.push_back(ssrcs[0]);
+ if (include_rtx_stream) {
+ stream_param.AddFidSsrc(ssrcs[0], ssrcs[1]);
+ content_description->set_multistream(true);
+ }
+ stream_param.cname = cname;
+ stream_param.sync_label = stream_it->sync_label;
+ content_description->AddStream(stream_param);
+
+ // Store the new StreamParams in current_streams.
+ // This is necessary so that we can use the CNAME for other media types.
+ current_streams->push_back(stream_param);
+ } else {
+ content_description->AddStream(param);
+ }
+ }
+ return true;
+}
+
+// Updates the transport infos of the |sdesc| according to the given
+// |bundle_group|. The transport infos of the content names within the
+// |bundle_group| should be updated to use the ufrag and pwd of the first
+// content within the |bundle_group|.
+static bool UpdateTransportInfoForBundle(const ContentGroup& bundle_group,
+ SessionDescription* sdesc) {
+ // The bundle should not be empty.
+ if (!sdesc || !bundle_group.FirstContentName()) {
+ return false;
+ }
+
+ // We should definitely have a transport for the first content.
+ std::string selected_content_name = *bundle_group.FirstContentName();
+ const TransportInfo* selected_transport_info =
+ sdesc->GetTransportInfoByName(selected_content_name);
+ if (!selected_transport_info) {
+ return false;
+ }
+
+ // Set the other contents to use the same ICE credentials.
+ const std::string selected_ufrag =
+ selected_transport_info->description.ice_ufrag;
+ const std::string selected_pwd =
+ selected_transport_info->description.ice_pwd;
+ for (TransportInfos::iterator it =
+ sdesc->transport_infos().begin();
+ it != sdesc->transport_infos().end(); ++it) {
+ if (bundle_group.HasContentName(it->content_name) &&
+ it->content_name != selected_content_name) {
+ it->description.ice_ufrag = selected_ufrag;
+ it->description.ice_pwd = selected_pwd;
+ }
+ }
+ return true;
+}
+
+// Gets the CryptoParamsVec of the given |content_name| from |sdesc|, and
+// sets it to |cryptos|.
+static bool GetCryptosByName(const SessionDescription* sdesc,
+ const std::string& content_name,
+ CryptoParamsVec* cryptos) {
+ if (!sdesc || !cryptos) {
+ return false;
+ }
+
+ const ContentInfo* content = sdesc->GetContentByName(content_name);
+ if (!IsMediaContent(content) || !content->description) {
+ return false;
+ }
+
+ const MediaContentDescription* media_desc =
+ static_cast<const MediaContentDescription*>(content->description);
+ *cryptos = media_desc->cryptos();
+ return true;
+}
+
+// Predicate function used by the remove_if.
+// Returns true if the |crypto|'s cipher_suite is not found in |filter|.
+static bool CryptoNotFound(const CryptoParams crypto,
+ const CryptoParamsVec* filter) {
+ if (filter == NULL) {
+ return true;
+ }
+ for (CryptoParamsVec::const_iterator it = filter->begin();
+ it != filter->end(); ++it) {
+ if (it->cipher_suite == crypto.cipher_suite) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Prunes the |target_cryptos| by removing the crypto params (cipher_suite)
+// which are not available in |filter|.
+static void PruneCryptos(const CryptoParamsVec& filter,
+ CryptoParamsVec* target_cryptos) {
+ if (!target_cryptos) {
+ return;
+ }
+ target_cryptos->erase(std::remove_if(target_cryptos->begin(),
+ target_cryptos->end(),
+ bind2nd(ptr_fun(CryptoNotFound),
+ &filter)),
+ target_cryptos->end());
+}
+
+static bool IsRtpContent(SessionDescription* sdesc,
+ const std::string& content_name) {
+ bool is_rtp = false;
+ ContentInfo* content = sdesc->GetContentByName(content_name);
+ if (IsMediaContent(content)) {
+ MediaContentDescription* media_desc =
+ static_cast<MediaContentDescription*>(content->description);
+ if (!media_desc) {
+ return false;
+ }
+ is_rtp = media_desc->protocol().empty() ||
+ talk_base::starts_with(media_desc->protocol().data(),
+ kMediaProtocolRtpPrefix);
+ }
+ return is_rtp;
+}
+
+// Updates the crypto parameters of the |sdesc| according to the given
+// |bundle_group|. The crypto parameters of all the contents within the
+// |bundle_group| should be updated to use the common subset of the
+// available cryptos.
+static bool UpdateCryptoParamsForBundle(const ContentGroup& bundle_group,
+ SessionDescription* sdesc) {
+ // The bundle should not be empty.
+ if (!sdesc || !bundle_group.FirstContentName()) {
+ return false;
+ }
+
+ // Get the common cryptos.
+ const ContentNames& content_names = bundle_group.content_names();
+ CryptoParamsVec common_cryptos;
+ for (ContentNames::const_iterator it = content_names.begin();
+ it != content_names.end(); ++it) {
+ if (!IsRtpContent(sdesc, *it)) {
+ continue;
+ }
+ if (it == content_names.begin()) {
+ // Initial the common_cryptos with the first content in the bundle group.
+ if (!GetCryptosByName(sdesc, *it, &common_cryptos)) {
+ return false;
+ }
+ if (common_cryptos.empty()) {
+ // If there's no crypto params, we should just return.
+ return true;
+ }
+ } else {
+ CryptoParamsVec cryptos;
+ if (!GetCryptosByName(sdesc, *it, &cryptos)) {
+ return false;
+ }
+ PruneCryptos(cryptos, &common_cryptos);
+ }
+ }
+
+ if (common_cryptos.empty()) {
+ return false;
+ }
+
+ // Update to use the common cryptos.
+ for (ContentNames::const_iterator it = content_names.begin();
+ it != content_names.end(); ++it) {
+ if (!IsRtpContent(sdesc, *it)) {
+ continue;
+ }
+ ContentInfo* content = sdesc->GetContentByName(*it);
+ if (IsMediaContent(content)) {
+ MediaContentDescription* media_desc =
+ static_cast<MediaContentDescription*>(content->description);
+ if (!media_desc) {
+ return false;
+ }
+ media_desc->set_cryptos(common_cryptos);
+ }
+ }
+ return true;
+}
+
+template <class C>
+static bool ContainsRtxCodec(const std::vector<C>& codecs) {
+ typename std::vector<C>::const_iterator it;
+ for (it = codecs.begin(); it != codecs.end(); ++it) {
+ if (IsRtxCodec(*it)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+template <class C>
+static bool IsRtxCodec(const C& codec) {
+ return stricmp(codec.name.c_str(), kRtxCodecName) == 0;
+}
+
+// Create a media content to be offered in a session-initiate,
+// according to the given options.rtcp_mux, options.is_muc,
+// options.streams, codecs, secure_transport, crypto, and streams. If we don't
+// currently have crypto (in current_cryptos) and it is enabled (in
+// secure_policy), crypto is created (according to crypto_suites). If
+// add_legacy_stream is true, and current_streams is empty, a legacy
+// stream is created. The created content is added to the offer.
+template <class C>
+static bool CreateMediaContentOffer(
+ const MediaSessionOptions& options,
+ const std::vector<C>& codecs,
+ const SecureMediaPolicy& secure_policy,
+ const CryptoParamsVec* current_cryptos,
+ const std::vector<std::string>& crypto_suites,
+ const RtpHeaderExtensions& rtp_extensions,
+ bool add_legacy_stream,
+ StreamParamsVec* current_streams,
+ MediaContentDescriptionImpl<C>* offer) {
+ offer->AddCodecs(codecs);
+ offer->SortCodecs();
+
+ offer->set_crypto_required(secure_policy == SEC_REQUIRED);
+ offer->set_rtcp_mux(options.rtcp_mux_enabled);
+ offer->set_multistream(options.is_muc);
+ offer->set_rtp_header_extensions(rtp_extensions);
+
+ if (!AddStreamParams(
+ offer->type(), options.streams, current_streams,
+ offer, add_legacy_stream)) {
+ return false;
+ }
+
+#ifdef HAVE_SRTP
+ if (secure_policy != SEC_DISABLED) {
+ if (current_cryptos) {
+ AddMediaCryptos(*current_cryptos, offer);
+ }
+ if (offer->cryptos().empty()) {
+ if (!CreateMediaCryptos(crypto_suites, offer)) {
+ return false;
+ }
+ }
+ }
+#endif
+
+ if (offer->crypto_required() && offer->cryptos().empty()) {
+ return false;
+ }
+ return true;
+}
+
+template <class C>
+static void NegotiateCodecs(const std::vector<C>& local_codecs,
+ const std::vector<C>& offered_codecs,
+ std::vector<C>* negotiated_codecs) {
+ typename std::vector<C>::const_iterator ours;
+ for (ours = local_codecs.begin();
+ ours != local_codecs.end(); ++ours) {
+ typename std::vector<C>::const_iterator theirs;
+ for (theirs = offered_codecs.begin();
+ theirs != offered_codecs.end(); ++theirs) {
+ if (ours->Matches(*theirs)) {
+ C negotiated = *ours;
+ negotiated.IntersectFeedbackParams(*theirs);
+ if (IsRtxCodec(negotiated)) {
+ // Only negotiate RTX if kCodecParamAssociatedPayloadType has been
+ // set.
+ std::string apt_value;
+ if (!theirs->GetParam(kCodecParamAssociatedPayloadType, &apt_value)) {
+ LOG(LS_WARNING) << "RTX missing associated payload type.";
+ continue;
+ }
+ negotiated.SetParam(kCodecParamAssociatedPayloadType, apt_value);
+ }
+ negotiated.id = theirs->id;
+ negotiated_codecs->push_back(negotiated);
+ }
+ }
+ }
+}
+
+template <class C>
+static bool FindMatchingCodec(const std::vector<C>& codecs,
+ const C& codec_to_match,
+ C* found_codec) {
+ for (typename std::vector<C>::const_iterator it = codecs.begin();
+ it != codecs.end(); ++it) {
+ if (it->Matches(codec_to_match)) {
+ if (found_codec != NULL) {
+ *found_codec= *it;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+// Adds all codecs from |reference_codecs| to |offered_codecs| that dont'
+// already exist in |offered_codecs| and ensure the payload types don't
+// collide.
+template <class C>
+static void FindCodecsToOffer(
+ const std::vector<C>& reference_codecs,
+ std::vector<C>* offered_codecs,
+ UsedPayloadTypes* used_pltypes) {
+
+ typedef std::map<int, C> RtxCodecReferences;
+ RtxCodecReferences new_rtx_codecs;
+
+ // Find all new RTX codecs.
+ for (typename std::vector<C>::const_iterator it = reference_codecs.begin();
+ it != reference_codecs.end(); ++it) {
+ if (!FindMatchingCodec<C>(*offered_codecs, *it, NULL) && IsRtxCodec(*it)) {
+ C rtx_codec = *it;
+ int referenced_pl_type =
+ talk_base::FromString<int>(0,
+ rtx_codec.params[kCodecParamAssociatedPayloadType]);
+ new_rtx_codecs.insert(std::pair<int, C>(referenced_pl_type,
+ rtx_codec));
+ }
+ }
+
+ // Add all new codecs that are not RTX codecs.
+ for (typename std::vector<C>::const_iterator it = reference_codecs.begin();
+ it != reference_codecs.end(); ++it) {
+ if (!FindMatchingCodec<C>(*offered_codecs, *it, NULL) && !IsRtxCodec(*it)) {
+ C codec = *it;
+ int original_payload_id = codec.id;
+ used_pltypes->FindAndSetIdUsed(&codec);
+ offered_codecs->push_back(codec);
+
+ // If this codec is referenced by a new RTX codec, update the reference
+ // in the RTX codec with the new payload type.
+ typename RtxCodecReferences::iterator rtx_it =
+ new_rtx_codecs.find(original_payload_id);
+ if (rtx_it != new_rtx_codecs.end()) {
+ C& rtx_codec = rtx_it->second;
+ rtx_codec.params[kCodecParamAssociatedPayloadType] =
+ talk_base::ToString(codec.id);
+ }
+ }
+ }
+
+ // Add all new RTX codecs.
+ for (typename RtxCodecReferences::iterator it = new_rtx_codecs.begin();
+ it != new_rtx_codecs.end(); ++it) {
+ C& rtx_codec = it->second;
+ used_pltypes->FindAndSetIdUsed(&rtx_codec);
+ offered_codecs->push_back(rtx_codec);
+ }
+}
+
+
+static bool FindByUri(const RtpHeaderExtensions& extensions,
+ const RtpHeaderExtension& ext_to_match,
+ RtpHeaderExtension* found_extension) {
+ for (RtpHeaderExtensions::const_iterator it = extensions.begin();
+ it != extensions.end(); ++it) {
+ // We assume that all URIs are given in a canonical format.
+ if (it->uri == ext_to_match.uri) {
+ if (found_extension != NULL) {
+ *found_extension= *it;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static void FindAndSetRtpHdrExtUsed(
+ const RtpHeaderExtensions& reference_extensions,
+ RtpHeaderExtensions* offered_extensions,
+ UsedRtpHeaderExtensionIds* used_extensions) {
+ for (RtpHeaderExtensions::const_iterator it = reference_extensions.begin();
+ it != reference_extensions.end(); ++it) {
+ if (!FindByUri(*offered_extensions, *it, NULL)) {
+ RtpHeaderExtension ext = *it;
+ used_extensions->FindAndSetIdUsed(&ext);
+ offered_extensions->push_back(ext);
+ }
+ }
+}
+
+static void NegotiateRtpHeaderExtensions(
+ const RtpHeaderExtensions& local_extensions,
+ const RtpHeaderExtensions& offered_extensions,
+ RtpHeaderExtensions* negotiated_extenstions) {
+ RtpHeaderExtensions::const_iterator ours;
+ for (ours = local_extensions.begin();
+ ours != local_extensions.end(); ++ours) {
+ RtpHeaderExtension theirs;
+ if (FindByUri(offered_extensions, *ours, &theirs)) {
+ // We respond with their RTP header extension id.
+ negotiated_extenstions->push_back(theirs);
+ }
+ }
+}
+
+static void StripCNCodecs(AudioCodecs* audio_codecs) {
+ AudioCodecs::iterator iter = audio_codecs->begin();
+ while (iter != audio_codecs->end()) {
+ if (stricmp(iter->name.c_str(), kComfortNoiseCodecName) == 0) {
+ iter = audio_codecs->erase(iter);
+ } else {
+ ++iter;
+ }
+ }
+}
+
+// Create a media content to be answered in a session-accept,
+// according to the given options.rtcp_mux, options.streams, codecs,
+// crypto, and streams. If we don't currently have crypto (in
+// current_cryptos) and it is enabled (in secure_policy), crypto is
+// created (according to crypto_suites). If add_legacy_stream is
+// true, and current_streams is empty, a legacy stream is created.
+// The codecs, rtcp_mux, and crypto are all negotiated with the offer
+// from the incoming session-initiate. If the negotiation fails, this
+// method returns false. The created content is added to the offer.
+template <class C>
+static bool CreateMediaContentAnswer(
+ const MediaContentDescriptionImpl<C>* offer,
+ const MediaSessionOptions& options,
+ const std::vector<C>& local_codecs,
+ const SecureMediaPolicy& sdes_policy,
+ const CryptoParamsVec* current_cryptos,
+ const RtpHeaderExtensions& local_rtp_extenstions,
+ StreamParamsVec* current_streams,
+ bool add_legacy_stream,
+ bool bundle_enabled,
+ MediaContentDescriptionImpl<C>* answer) {
+ std::vector<C> negotiated_codecs;
+ NegotiateCodecs(local_codecs, offer->codecs(), &negotiated_codecs);
+ answer->AddCodecs(negotiated_codecs);
+ answer->SortCodecs();
+ answer->set_protocol(offer->protocol());
+ RtpHeaderExtensions negotiated_rtp_extensions;
+ NegotiateRtpHeaderExtensions(local_rtp_extenstions,
+ offer->rtp_header_extensions(),
+ &negotiated_rtp_extensions);
+ answer->set_rtp_header_extensions(negotiated_rtp_extensions);
+
+ answer->set_rtcp_mux(options.rtcp_mux_enabled && offer->rtcp_mux());
+
+ if (sdes_policy != SEC_DISABLED) {
+ CryptoParams crypto;
+ if (SelectCrypto(offer, bundle_enabled, &crypto)) {
+ if (current_cryptos) {
+ FindMatchingCrypto(*current_cryptos, crypto, &crypto);
+ }
+ answer->AddCrypto(crypto);
+ }
+ }
+
+ if (answer->cryptos().empty() &&
+ (offer->crypto_required() || sdes_policy == SEC_REQUIRED)) {
+ return false;
+ }
+
+ if (!AddStreamParams(
+ answer->type(), options.streams, current_streams,
+ answer, add_legacy_stream)) {
+ return false; // Something went seriously wrong.
+ }
+
+ // Make sure the answer media content direction is per default set as
+ // described in RFC3264 section 6.1.
+ switch (offer->direction()) {
+ case MD_INACTIVE:
+ answer->set_direction(MD_INACTIVE);
+ break;
+ case MD_SENDONLY:
+ answer->set_direction(MD_RECVONLY);
+ break;
+ case MD_RECVONLY:
+ answer->set_direction(MD_SENDONLY);
+ break;
+ case MD_SENDRECV:
+ answer->set_direction(MD_SENDRECV);
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool IsMediaProtocolSupported(MediaType type,
+ const std::string& protocol) {
+ // Data channels can have a protocol of SCTP or SCTP/DTLS.
+ if (type == MEDIA_TYPE_DATA &&
+ (protocol == kMediaProtocolSctp ||
+ protocol == kMediaProtocolDtlsSctp)) {
+ return true;
+ }
+ // Since not all applications serialize and deserialize the media protocol,
+ // we will have to accept |protocol| to be empty.
+ return protocol == kMediaProtocolAvpf || protocol == kMediaProtocolSavpf ||
+ protocol.empty();
+}
+
+static void SetMediaProtocol(bool secure_transport,
+ MediaContentDescription* desc) {
+ if (!desc->cryptos().empty() || secure_transport)
+ desc->set_protocol(kMediaProtocolSavpf);
+ else
+ desc->set_protocol(kMediaProtocolAvpf);
+}
+
+void MediaSessionOptions::AddStream(MediaType type,
+ const std::string& id,
+ const std::string& sync_label) {
+ streams.push_back(Stream(type, id, sync_label));
+
+ if (type == MEDIA_TYPE_VIDEO)
+ has_video = true;
+ else if (type == MEDIA_TYPE_AUDIO)
+ has_audio = true;
+ // If we haven't already set the data_channel_type, and we add a
+ // stream, we assume it's an RTP data stream.
+ else if (type == MEDIA_TYPE_DATA && data_channel_type == DCT_NONE)
+ data_channel_type = DCT_RTP;
+}
+
+void MediaSessionOptions::RemoveStream(MediaType type,
+ const std::string& id) {
+ Streams::iterator stream_it = streams.begin();
+ for (; stream_it != streams.end(); ++stream_it) {
+ if (stream_it->type == type && stream_it->id == id) {
+ streams.erase(stream_it);
+ return;
+ }
+ }
+ ASSERT(false);
+}
+
+MediaSessionDescriptionFactory::MediaSessionDescriptionFactory(
+ const TransportDescriptionFactory* transport_desc_factory)
+ : secure_(SEC_DISABLED),
+ add_legacy_(true),
+ transport_desc_factory_(transport_desc_factory) {
+}
+
+MediaSessionDescriptionFactory::MediaSessionDescriptionFactory(
+ ChannelManager* channel_manager,
+ const TransportDescriptionFactory* transport_desc_factory)
+ : secure_(SEC_DISABLED),
+ add_legacy_(true),
+ transport_desc_factory_(transport_desc_factory) {
+ channel_manager->GetSupportedAudioCodecs(&audio_codecs_);
+ channel_manager->GetSupportedAudioRtpHeaderExtensions(&audio_rtp_extensions_);
+ channel_manager->GetSupportedVideoCodecs(&video_codecs_);
+ channel_manager->GetSupportedVideoRtpHeaderExtensions(&video_rtp_extensions_);
+ channel_manager->GetSupportedDataCodecs(&data_codecs_);
+}
+
+SessionDescription* MediaSessionDescriptionFactory::CreateOffer(
+ const MediaSessionOptions& options,
+ const SessionDescription* current_description) const {
+ bool secure_transport = (transport_desc_factory_->secure() != SEC_DISABLED);
+
+ scoped_ptr<SessionDescription> offer(new SessionDescription());
+
+ StreamParamsVec current_streams;
+ GetCurrentStreamParams(current_description, &current_streams);
+
+ AudioCodecs audio_codecs;
+ VideoCodecs video_codecs;
+ DataCodecs data_codecs;
+ GetCodecsToOffer(current_description, &audio_codecs, &video_codecs,
+ &data_codecs);
+
+ if (!options.vad_enabled) {
+ // If application doesn't want CN codecs in offer.
+ StripCNCodecs(&audio_codecs);
+ }
+
+ RtpHeaderExtensions audio_rtp_extensions;
+ RtpHeaderExtensions video_rtp_extensions;
+ GetRtpHdrExtsToOffer(current_description, &audio_rtp_extensions,
+ &video_rtp_extensions);
+
+ // Handle m=audio.
+ if (options.has_audio) {
+ scoped_ptr<AudioContentDescription> audio(new AudioContentDescription());
+ std::vector<std::string> crypto_suites;
+ GetSupportedAudioCryptoSuites(&crypto_suites);
+ if (!CreateMediaContentOffer(
+ options,
+ audio_codecs,
+ secure(),
+ GetCryptos(GetFirstAudioContentDescription(current_description)),
+ crypto_suites,
+ audio_rtp_extensions,
+ add_legacy_,
+ &current_streams,
+ audio.get())) {
+ return NULL;
+ }
+
+ audio->set_lang(lang_);
+ SetMediaProtocol(secure_transport, audio.get());
+ offer->AddContent(CN_AUDIO, NS_JINGLE_RTP, audio.release());
+ if (!AddTransportOffer(CN_AUDIO, options.transport_options,
+ current_description, offer.get())) {
+ return NULL;
+ }
+ }
+
+ // Handle m=video.
+ if (options.has_video) {
+ scoped_ptr<VideoContentDescription> video(new VideoContentDescription());
+ std::vector<std::string> crypto_suites;
+ GetSupportedVideoCryptoSuites(&crypto_suites);
+ if (!CreateMediaContentOffer(
+ options,
+ video_codecs,
+ secure(),
+ GetCryptos(GetFirstVideoContentDescription(current_description)),
+ crypto_suites,
+ video_rtp_extensions,
+ add_legacy_,
+ &current_streams,
+ video.get())) {
+ return NULL;
+ }
+
+ video->set_bandwidth(options.video_bandwidth);
+ SetMediaProtocol(secure_transport, video.get());
+ offer->AddContent(CN_VIDEO, NS_JINGLE_RTP, video.release());
+ if (!AddTransportOffer(CN_VIDEO, options.transport_options,
+ current_description, offer.get())) {
+ return NULL;
+ }
+ }
+
+ // Handle m=data.
+ if (options.has_data()) {
+ scoped_ptr<DataContentDescription> data(new DataContentDescription());
+ bool is_sctp = (options.data_channel_type == DCT_SCTP);
+
+ std::vector<std::string> crypto_suites;
+ cricket::SecurePolicy sdes_policy = secure();
+ if (is_sctp) {
+ // SDES doesn't make sense for SCTP, so we disable it, and we only
+ // get SDES crypto suites for RTP-based data channels.
+ sdes_policy = cricket::SEC_DISABLED;
+ // Unlike SetMediaProtocol below, we need to set the protocol
+ // before we call CreateMediaContentOffer. Otherwise,
+ // CreateMediaContentOffer won't know this is SCTP and will
+ // generate SSRCs rather than SIDs.
+ data->set_protocol(
+ secure_transport ? kMediaProtocolDtlsSctp : kMediaProtocolSctp);
+ } else {
+ GetSupportedDataCryptoSuites(&crypto_suites);
+ }
+
+ if (!CreateMediaContentOffer(
+ options,
+ data_codecs,
+ sdes_policy,
+ GetCryptos(GetFirstDataContentDescription(current_description)),
+ crypto_suites,
+ RtpHeaderExtensions(),
+ add_legacy_,
+ &current_streams,
+ data.get())) {
+ return NULL;
+ }
+
+ if (is_sctp) {
+ offer->AddContent(CN_DATA, NS_JINGLE_DRAFT_SCTP, data.release());
+ } else {
+ data->set_bandwidth(options.data_bandwidth);
+ SetMediaProtocol(secure_transport, data.get());
+ offer->AddContent(CN_DATA, NS_JINGLE_RTP, data.release());
+ }
+ if (!AddTransportOffer(CN_DATA, options.transport_options,
+ current_description, offer.get())) {
+ return NULL;
+ }
+ }
+
+ // Bundle the contents together, if we've been asked to do so, and update any
+ // parameters that need to be tweaked for BUNDLE.
+ if (options.bundle_enabled) {
+ ContentGroup offer_bundle(GROUP_TYPE_BUNDLE);
+ for (ContentInfos::const_iterator content = offer->contents().begin();
+ content != offer->contents().end(); ++content) {
+ offer_bundle.AddContentName(content->name);
+ }
+ offer->AddGroup(offer_bundle);
+ if (!UpdateTransportInfoForBundle(offer_bundle, offer.get())) {
+ LOG(LS_ERROR) << "CreateOffer failed to UpdateTransportInfoForBundle.";
+ return NULL;
+ }
+ if (!UpdateCryptoParamsForBundle(offer_bundle, offer.get())) {
+ LOG(LS_ERROR) << "CreateOffer failed to UpdateCryptoParamsForBundle.";
+ return NULL;
+ }
+ }
+
+ return offer.release();
+}
+
+SessionDescription* MediaSessionDescriptionFactory::CreateAnswer(
+ const SessionDescription* offer, const MediaSessionOptions& options,
+ const SessionDescription* current_description) const {
+ // The answer contains the intersection of the codecs in the offer with the
+ // codecs we support, ordered by our local preference. As indicated by
+ // XEP-0167, we retain the same payload ids from the offer in the answer.
+ scoped_ptr<SessionDescription> answer(new SessionDescription());
+
+ StreamParamsVec current_streams;
+ GetCurrentStreamParams(current_description, &current_streams);
+
+ bool bundle_enabled =
+ offer->HasGroup(GROUP_TYPE_BUNDLE) && options.bundle_enabled;
+
+ // Handle m=audio.
+ const ContentInfo* audio_content = GetFirstAudioContent(offer);
+ if (audio_content) {
+ scoped_ptr<TransportDescription> audio_transport(
+ CreateTransportAnswer(audio_content->name, offer,
+ options.transport_options,
+ current_description));
+ if (!audio_transport) {
+ return NULL;
+ }
+
+ AudioCodecs audio_codecs = audio_codecs_;
+ if (!options.vad_enabled) {
+ StripCNCodecs(&audio_codecs);
+ }
+
+ scoped_ptr<AudioContentDescription> audio_answer(
+ new AudioContentDescription());
+ // Do not require or create SDES cryptos if DTLS is used.
+ cricket::SecurePolicy sdes_policy =
+ audio_transport->secure() ? cricket::SEC_DISABLED : secure();
+ if (!CreateMediaContentAnswer(
+ static_cast<const AudioContentDescription*>(
+ audio_content->description),
+ options,
+ audio_codecs,
+ sdes_policy,
+ GetCryptos(GetFirstAudioContentDescription(current_description)),
+ audio_rtp_extensions_,
+ &current_streams,
+ add_legacy_,
+ bundle_enabled,
+ audio_answer.get())) {
+ return NULL; // Fails the session setup.
+ }
+
+ bool rejected = !options.has_audio || audio_content->rejected ||
+ !IsMediaProtocolSupported(MEDIA_TYPE_AUDIO,
+ audio_answer->protocol());
+ if (!rejected) {
+ AddTransportAnswer(audio_content->name, *(audio_transport.get()),
+ answer.get());
+ } else {
+ // RFC 3264
+ // The answer MUST contain the same number of m-lines as the offer.
+ LOG(LS_INFO) << "Audio is not supported in the answer.";
+ }
+
+ answer->AddContent(audio_content->name, audio_content->type, rejected,
+ audio_answer.release());
+ } else {
+ LOG(LS_INFO) << "Audio is not available in the offer.";
+ }
+
+ // Handle m=video.
+ const ContentInfo* video_content = GetFirstVideoContent(offer);
+ if (video_content) {
+ scoped_ptr<TransportDescription> video_transport(
+ CreateTransportAnswer(video_content->name, offer,
+ options.transport_options,
+ current_description));
+ if (!video_transport) {
+ return NULL;
+ }
+
+ scoped_ptr<VideoContentDescription> video_answer(
+ new VideoContentDescription());
+ // Do not require or create SDES cryptos if DTLS is used.
+ cricket::SecurePolicy sdes_policy =
+ video_transport->secure() ? cricket::SEC_DISABLED : secure();
+ if (!CreateMediaContentAnswer(
+ static_cast<const VideoContentDescription*>(
+ video_content->description),
+ options,
+ video_codecs_,
+ sdes_policy,
+ GetCryptos(GetFirstVideoContentDescription(current_description)),
+ video_rtp_extensions_,
+ &current_streams,
+ add_legacy_,
+ bundle_enabled,
+ video_answer.get())) {
+ return NULL;
+ }
+ bool rejected = !options.has_video || video_content->rejected ||
+ !IsMediaProtocolSupported(MEDIA_TYPE_VIDEO, video_answer->protocol());
+ if (!rejected) {
+ if (!AddTransportAnswer(video_content->name, *(video_transport.get()),
+ answer.get())) {
+ return NULL;
+ }
+ video_answer->set_bandwidth(options.video_bandwidth);
+ } else {
+ // RFC 3264
+ // The answer MUST contain the same number of m-lines as the offer.
+ LOG(LS_INFO) << "Video is not supported in the answer.";
+ }
+ answer->AddContent(video_content->name, video_content->type, rejected,
+ video_answer.release());
+ } else {
+ LOG(LS_INFO) << "Video is not available in the offer.";
+ }
+
+ // Handle m=data.
+ const ContentInfo* data_content = GetFirstDataContent(offer);
+ if (data_content) {
+ scoped_ptr<TransportDescription> data_transport(
+ CreateTransportAnswer(data_content->name, offer,
+ options.transport_options,
+ current_description));
+ if (!data_transport) {
+ return NULL;
+ }
+ scoped_ptr<DataContentDescription> data_answer(
+ new DataContentDescription());
+ // Do not require or create SDES cryptos if DTLS is used.
+ cricket::SecurePolicy sdes_policy =
+ data_transport->secure() ? cricket::SEC_DISABLED : secure();
+ if (!CreateMediaContentAnswer(
+ static_cast<const DataContentDescription*>(
+ data_content->description),
+ options,
+ data_codecs_,
+ sdes_policy,
+ GetCryptos(GetFirstDataContentDescription(current_description)),
+ RtpHeaderExtensions(),
+ &current_streams,
+ add_legacy_,
+ bundle_enabled,
+ data_answer.get())) {
+ return NULL; // Fails the session setup.
+ }
+
+ bool rejected = !options.has_data() || data_content->rejected ||
+ !IsMediaProtocolSupported(MEDIA_TYPE_DATA, data_answer->protocol());
+ if (!rejected) {
+ data_answer->set_bandwidth(options.data_bandwidth);
+ if (!AddTransportAnswer(data_content->name, *(data_transport.get()),
+ answer.get())) {
+ return NULL;
+ }
+ } else {
+ // RFC 3264
+ // The answer MUST contain the same number of m-lines as the offer.
+ LOG(LS_INFO) << "Data is not supported in the answer.";
+ }
+ answer->AddContent(data_content->name, data_content->type, rejected,
+ data_answer.release());
+ } else {
+ LOG(LS_INFO) << "Data is not available in the offer.";
+ }
+
+ // If the offer supports BUNDLE, and we want to use it too, create a BUNDLE
+ // group in the answer with the appropriate content names.
+ if (offer->HasGroup(GROUP_TYPE_BUNDLE) && options.bundle_enabled) {
+ const ContentGroup* offer_bundle = offer->GetGroupByName(GROUP_TYPE_BUNDLE);
+ ContentGroup answer_bundle(GROUP_TYPE_BUNDLE);
+ for (ContentInfos::const_iterator content = answer->contents().begin();
+ content != answer->contents().end(); ++content) {
+ if (!content->rejected && offer_bundle->HasContentName(content->name)) {
+ answer_bundle.AddContentName(content->name);
+ }
+ }
+ if (answer_bundle.FirstContentName()) {
+ answer->AddGroup(answer_bundle);
+
+ // Share the same ICE credentials and crypto params across all contents,
+ // as BUNDLE requires.
+ if (!UpdateTransportInfoForBundle(answer_bundle, answer.get())) {
+ LOG(LS_ERROR) << "CreateAnswer failed to UpdateTransportInfoForBundle.";
+ return NULL;
+ }
+
+ if (!UpdateCryptoParamsForBundle(answer_bundle, answer.get())) {
+ LOG(LS_ERROR) << "CreateAnswer failed to UpdateCryptoParamsForBundle.";
+ return NULL;
+ }
+ }
+ }
+
+ return answer.release();
+}
+
+// Gets the TransportInfo of the given |content_name| from the
+// |current_description|. If doesn't exist, returns a new one.
+static const TransportDescription* GetTransportDescription(
+ const std::string& content_name,
+ const SessionDescription* current_description) {
+ const TransportDescription* desc = NULL;
+ if (current_description) {
+ const TransportInfo* info =
+ current_description->GetTransportInfoByName(content_name);
+ if (info) {
+ desc = &info->description;
+ }
+ }
+ return desc;
+}
+
+void MediaSessionDescriptionFactory::GetCodecsToOffer(
+ const SessionDescription* current_description,
+ AudioCodecs* audio_codecs,
+ VideoCodecs* video_codecs,
+ DataCodecs* data_codecs) const {
+ UsedPayloadTypes used_pltypes;
+ audio_codecs->clear();
+ video_codecs->clear();
+ data_codecs->clear();
+
+
+ // First - get all codecs from the current description if the media type
+ // is used.
+ // Add them to |used_pltypes| so the payloadtype is not reused if a new media
+ // type is added.
+ if (current_description) {
+ const AudioContentDescription* audio =
+ GetFirstAudioContentDescription(current_description);
+ if (audio) {
+ *audio_codecs = audio->codecs();
+ used_pltypes.FindAndSetIdUsed<AudioCodec>(audio_codecs);
+ }
+ const VideoContentDescription* video =
+ GetFirstVideoContentDescription(current_description);
+ if (video) {
+ *video_codecs = video->codecs();
+ used_pltypes.FindAndSetIdUsed<VideoCodec>(video_codecs);
+ }
+ const DataContentDescription* data =
+ GetFirstDataContentDescription(current_description);
+ if (data) {
+ *data_codecs = data->codecs();
+ used_pltypes.FindAndSetIdUsed<DataCodec>(data_codecs);
+ }
+ }
+
+ // Add our codecs that are not in |current_description|.
+ FindCodecsToOffer<AudioCodec>(audio_codecs_, audio_codecs, &used_pltypes);
+ FindCodecsToOffer<VideoCodec>(video_codecs_, video_codecs, &used_pltypes);
+ FindCodecsToOffer<DataCodec>(data_codecs_, data_codecs, &used_pltypes);
+}
+
+void MediaSessionDescriptionFactory::GetRtpHdrExtsToOffer(
+ const SessionDescription* current_description,
+ RtpHeaderExtensions* audio_extensions,
+ RtpHeaderExtensions* video_extensions) const {
+ UsedRtpHeaderExtensionIds used_ids;
+ audio_extensions->clear();
+ video_extensions->clear();
+
+ // First - get all extensions from the current description if the media type
+ // is used.
+ // Add them to |used_ids| so the local ids are not reused if a new media
+ // type is added.
+ if (current_description) {
+ const AudioContentDescription* audio =
+ GetFirstAudioContentDescription(current_description);
+ if (audio) {
+ *audio_extensions = audio->rtp_header_extensions();
+ used_ids.FindAndSetIdUsed(audio_extensions);
+ }
+ const VideoContentDescription* video =
+ GetFirstVideoContentDescription(current_description);
+ if (video) {
+ *video_extensions = video->rtp_header_extensions();
+ used_ids.FindAndSetIdUsed(video_extensions);
+ }
+ }
+
+ // Add our default RTP header extensions that are not in
+ // |current_description|.
+ FindAndSetRtpHdrExtUsed(audio_rtp_header_extensions(), audio_extensions,
+ &used_ids);
+ FindAndSetRtpHdrExtUsed(video_rtp_header_extensions(), video_extensions,
+ &used_ids);
+}
+
+bool MediaSessionDescriptionFactory::AddTransportOffer(
+ const std::string& content_name,
+ const TransportOptions& transport_options,
+ const SessionDescription* current_desc,
+ SessionDescription* offer_desc) const {
+ if (!transport_desc_factory_)
+ return false;
+ const TransportDescription* current_tdesc =
+ GetTransportDescription(content_name, current_desc);
+ talk_base::scoped_ptr<TransportDescription> new_tdesc(
+ transport_desc_factory_->CreateOffer(transport_options, current_tdesc));
+ bool ret = (new_tdesc.get() != NULL &&
+ offer_desc->AddTransportInfo(TransportInfo(content_name, *new_tdesc)));
+ if (!ret) {
+ LOG(LS_ERROR)
+ << "Failed to AddTransportOffer, content name=" << content_name;
+ }
+ return ret;
+}
+
+TransportDescription* MediaSessionDescriptionFactory::CreateTransportAnswer(
+ const std::string& content_name,
+ const SessionDescription* offer_desc,
+ const TransportOptions& transport_options,
+ const SessionDescription* current_desc) const {
+ if (!transport_desc_factory_)
+ return NULL;
+ const TransportDescription* offer_tdesc =
+ GetTransportDescription(content_name, offer_desc);
+ const TransportDescription* current_tdesc =
+ GetTransportDescription(content_name, current_desc);
+ return
+ transport_desc_factory_->CreateAnswer(offer_tdesc, transport_options,
+ current_tdesc);
+}
+
+bool MediaSessionDescriptionFactory::AddTransportAnswer(
+ const std::string& content_name,
+ const TransportDescription& transport_desc,
+ SessionDescription* answer_desc) const {
+ if (!answer_desc->AddTransportInfo(TransportInfo(content_name,
+ transport_desc))) {
+ LOG(LS_ERROR)
+ << "Failed to AddTransportAnswer, content name=" << content_name;
+ return false;
+ }
+ return true;
+}
+
+bool IsMediaContent(const ContentInfo* content) {
+ return (content &&
+ (content->type == NS_JINGLE_RTP ||
+ content->type == NS_JINGLE_DRAFT_SCTP));
+}
+
+bool IsAudioContent(const ContentInfo* content) {
+ return IsMediaContentOfType(content, MEDIA_TYPE_AUDIO);
+}
+
+bool IsVideoContent(const ContentInfo* content) {
+ return IsMediaContentOfType(content, MEDIA_TYPE_VIDEO);
+}
+
+bool IsDataContent(const ContentInfo* content) {
+ return IsMediaContentOfType(content, MEDIA_TYPE_DATA);
+}
+
+static const ContentInfo* GetFirstMediaContent(const ContentInfos& contents,
+ MediaType media_type) {
+ for (ContentInfos::const_iterator content = contents.begin();
+ content != contents.end(); content++) {
+ if (IsMediaContentOfType(&*content, media_type)) {
+ return &*content;
+ }
+ }
+ return NULL;
+}
+
+const ContentInfo* GetFirstAudioContent(const ContentInfos& contents) {
+ return GetFirstMediaContent(contents, MEDIA_TYPE_AUDIO);
+}
+
+const ContentInfo* GetFirstVideoContent(const ContentInfos& contents) {
+ return GetFirstMediaContent(contents, MEDIA_TYPE_VIDEO);
+}
+
+const ContentInfo* GetFirstDataContent(const ContentInfos& contents) {
+ return GetFirstMediaContent(contents, MEDIA_TYPE_DATA);
+}
+
+static const ContentInfo* GetFirstMediaContent(const SessionDescription* sdesc,
+ MediaType media_type) {
+ if (sdesc == NULL)
+ return NULL;
+
+ return GetFirstMediaContent(sdesc->contents(), media_type);
+}
+
+const ContentInfo* GetFirstAudioContent(const SessionDescription* sdesc) {
+ return GetFirstMediaContent(sdesc, MEDIA_TYPE_AUDIO);
+}
+
+const ContentInfo* GetFirstVideoContent(const SessionDescription* sdesc) {
+ return GetFirstMediaContent(sdesc, MEDIA_TYPE_VIDEO);
+}
+
+const ContentInfo* GetFirstDataContent(const SessionDescription* sdesc) {
+ return GetFirstMediaContent(sdesc, MEDIA_TYPE_DATA);
+}
+
+const MediaContentDescription* GetFirstMediaContentDescription(
+ const SessionDescription* sdesc, MediaType media_type) {
+ const ContentInfo* content = GetFirstMediaContent(sdesc, media_type);
+ const ContentDescription* description = content ? content->description : NULL;
+ return static_cast<const MediaContentDescription*>(description);
+}
+
+const AudioContentDescription* GetFirstAudioContentDescription(
+ const SessionDescription* sdesc) {
+ return static_cast<const AudioContentDescription*>(
+ GetFirstMediaContentDescription(sdesc, MEDIA_TYPE_AUDIO));
+}
+
+const VideoContentDescription* GetFirstVideoContentDescription(
+ const SessionDescription* sdesc) {
+ return static_cast<const VideoContentDescription*>(
+ GetFirstMediaContentDescription(sdesc, MEDIA_TYPE_VIDEO));
+}
+
+const DataContentDescription* GetFirstDataContentDescription(
+ const SessionDescription* sdesc) {
+ return static_cast<const DataContentDescription*>(
+ GetFirstMediaContentDescription(sdesc, MEDIA_TYPE_DATA));
+}
+
+bool GetMediaChannelNameFromComponent(
+ int component, MediaType media_type, std::string* channel_name) {
+ if (media_type == MEDIA_TYPE_AUDIO) {
+ if (component == ICE_CANDIDATE_COMPONENT_RTP) {
+ *channel_name = GICE_CHANNEL_NAME_RTP;
+ return true;
+ } else if (component == ICE_CANDIDATE_COMPONENT_RTCP) {
+ *channel_name = GICE_CHANNEL_NAME_RTCP;
+ return true;
+ }
+ } else if (media_type == MEDIA_TYPE_VIDEO) {
+ if (component == ICE_CANDIDATE_COMPONENT_RTP) {
+ *channel_name = GICE_CHANNEL_NAME_VIDEO_RTP;
+ return true;
+ } else if (component == ICE_CANDIDATE_COMPONENT_RTCP) {
+ *channel_name = GICE_CHANNEL_NAME_VIDEO_RTCP;
+ return true;
+ }
+ } else if (media_type == MEDIA_TYPE_DATA) {
+ if (component == ICE_CANDIDATE_COMPONENT_RTP) {
+ *channel_name = GICE_CHANNEL_NAME_DATA_RTP;
+ return true;
+ } else if (component == ICE_CANDIDATE_COMPONENT_RTCP) {
+ *channel_name = GICE_CHANNEL_NAME_DATA_RTCP;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool GetMediaComponentFromChannelName(
+ const std::string& channel_name, int* component) {
+ if (channel_name == GICE_CHANNEL_NAME_RTP ||
+ channel_name == GICE_CHANNEL_NAME_VIDEO_RTP ||
+ channel_name == GICE_CHANNEL_NAME_DATA_RTP) {
+ *component = ICE_CANDIDATE_COMPONENT_RTP;
+ return true;
+ } else if (channel_name == GICE_CHANNEL_NAME_RTCP ||
+ channel_name == GICE_CHANNEL_NAME_VIDEO_RTCP ||
+ channel_name == GICE_CHANNEL_NAME_DATA_RTP) {
+ *component = ICE_CANDIDATE_COMPONENT_RTCP;
+ return true;
+ }
+
+ return false;
+}
+
+bool GetMediaTypeFromChannelName(
+ const std::string& channel_name, MediaType* media_type) {
+ if (channel_name == GICE_CHANNEL_NAME_RTP ||
+ channel_name == GICE_CHANNEL_NAME_RTCP) {
+ *media_type = MEDIA_TYPE_AUDIO;
+ return true;
+ } else if (channel_name == GICE_CHANNEL_NAME_VIDEO_RTP ||
+ channel_name == GICE_CHANNEL_NAME_VIDEO_RTCP) {
+ *media_type = MEDIA_TYPE_VIDEO;
+ return true;
+ } else if (channel_name == GICE_CHANNEL_NAME_DATA_RTP ||
+ channel_name == GICE_CHANNEL_NAME_DATA_RTCP) {
+ *media_type = MEDIA_TYPE_DATA;
+ return true;
+ }
+
+ return false;
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediasession.h b/chromium/third_party/libjingle/source/talk/session/media/mediasession.h
new file mode 100644
index 00000000000..327480466a7
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediasession.h
@@ -0,0 +1,497 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Types and classes used in media session descriptions.
+
+#ifndef TALK_SESSION_MEDIA_MEDIASESSION_H_
+#define TALK_SESSION_MEDIA_MEDIASESSION_H_
+
+#include <string>
+#include <vector>
+#include <algorithm>
+
+#include "talk/base/scoped_ptr.h"
+#include "talk/media/base/codec.h"
+#include "talk/media/base/constants.h"
+#include "talk/media/base/cryptoparams.h"
+#include "talk/media/base/mediachannel.h"
+#include "talk/media/base/mediaengine.h" // For DataChannelType
+#include "talk/media/base/streamparams.h"
+#include "talk/p2p/base/sessiondescription.h"
+#include "talk/p2p/base/transport.h"
+#include "talk/p2p/base/transportdescriptionfactory.h"
+
+namespace cricket {
+
+class ChannelManager;
+typedef std::vector<AudioCodec> AudioCodecs;
+typedef std::vector<VideoCodec> VideoCodecs;
+typedef std::vector<DataCodec> DataCodecs;
+typedef std::vector<CryptoParams> CryptoParamsVec;
+typedef std::vector<RtpHeaderExtension> RtpHeaderExtensions;
+
+// TODO(juberti): Replace SecureMediaPolicy with SecurePolicy everywhere.
+typedef SecurePolicy SecureMediaPolicy;
+
+enum MediaType {
+ MEDIA_TYPE_AUDIO,
+ MEDIA_TYPE_VIDEO,
+ MEDIA_TYPE_DATA
+};
+
+enum MediaContentDirection {
+ MD_INACTIVE,
+ MD_SENDONLY,
+ MD_RECVONLY,
+ MD_SENDRECV
+};
+
+// RTC4585 RTP/AVPF
+extern const char kMediaProtocolAvpf[];
+// RFC5124 RTP/SAVPF
+extern const char kMediaProtocolSavpf[];
+
+extern const char kMediaProtocolRtpPrefix[];
+
+extern const char kMediaProtocolSctp[];
+extern const char kMediaProtocolDtlsSctp[];
+
+// Options to control how session descriptions are generated.
+const int kAutoBandwidth = -1;
+const int kBufferedModeDisabled = 0;
+// TODO(pthatcher): This is imposed by usrsctp lib. I have no idea
+// why it is 9. Figure out why, and make it bigger, hopefully up to
+// 2^16-1.
+const uint32 kMaxSctpSid = 9;
+
+struct MediaSessionOptions {
+ MediaSessionOptions() :
+ has_audio(true), // Audio enabled by default.
+ has_video(false),
+ data_channel_type(DCT_NONE),
+ is_muc(false),
+ vad_enabled(true), // When disabled, removes all CN codecs from SDP.
+ rtcp_mux_enabled(true),
+ bundle_enabled(false),
+ video_bandwidth(kAutoBandwidth),
+ data_bandwidth(kDataMaxBandwidth) {
+ }
+
+ bool has_data() const { return data_channel_type != DCT_NONE; }
+
+ // Add a stream with MediaType type and id.
+ // All streams with the same sync_label will get the same CNAME.
+ // All ids must be unique.
+ void AddStream(MediaType type,
+ const std::string& id,
+ const std::string& sync_label);
+ void RemoveStream(MediaType type, const std::string& id);
+
+ bool has_audio;
+ bool has_video;
+ DataChannelType data_channel_type;
+ bool is_muc;
+ bool vad_enabled;
+ bool rtcp_mux_enabled;
+ bool bundle_enabled;
+ // bps. -1 == auto.
+ int video_bandwidth;
+ int data_bandwidth;
+ TransportOptions transport_options;
+
+ struct Stream {
+ Stream(MediaType type,
+ const std::string& id,
+ const std::string& sync_label)
+ : type(type), id(id), sync_label(sync_label) {
+ }
+ MediaType type;
+ std::string id;
+ std::string sync_label;
+ };
+
+ typedef std::vector<Stream> Streams;
+ Streams streams;
+};
+
+// "content" (as used in XEP-0166) descriptions for voice and video.
+class MediaContentDescription : public ContentDescription {
+ public:
+ MediaContentDescription()
+ : rtcp_mux_(false),
+ bandwidth_(kAutoBandwidth),
+ crypto_required_(false),
+ rtp_header_extensions_set_(false),
+ multistream_(false),
+ conference_mode_(false),
+ partial_(false),
+ buffered_mode_latency_(kBufferedModeDisabled),
+ direction_(MD_SENDRECV) {
+ }
+
+ virtual MediaType type() const = 0;
+ virtual bool has_codecs() const = 0;
+
+ // |protocol| is the expected media transport protocol, such as RTP/AVPF,
+ // RTP/SAVPF or SCTP/DTLS.
+ std::string protocol() const { return protocol_; }
+ void set_protocol(const std::string& protocol) { protocol_ = protocol; }
+
+ MediaContentDirection direction() const { return direction_; }
+ void set_direction(MediaContentDirection direction) {
+ direction_ = direction;
+ }
+
+ bool rtcp_mux() const { return rtcp_mux_; }
+ void set_rtcp_mux(bool mux) { rtcp_mux_ = mux; }
+
+ int bandwidth() const { return bandwidth_; }
+ void set_bandwidth(int bandwidth) { bandwidth_ = bandwidth; }
+
+ const std::vector<CryptoParams>& cryptos() const { return cryptos_; }
+ void AddCrypto(const CryptoParams& params) {
+ cryptos_.push_back(params);
+ }
+ void set_cryptos(const std::vector<CryptoParams>& cryptos) {
+ cryptos_ = cryptos;
+ }
+ bool crypto_required() const { return crypto_required_; }
+ void set_crypto_required(bool crypto) {
+ crypto_required_ = crypto;
+ }
+
+ const RtpHeaderExtensions& rtp_header_extensions() const {
+ return rtp_header_extensions_;
+ }
+ void set_rtp_header_extensions(const RtpHeaderExtensions& extensions) {
+ rtp_header_extensions_ = extensions;
+ rtp_header_extensions_set_ = true;
+ }
+ void AddRtpHeaderExtension(const RtpHeaderExtension& ext) {
+ rtp_header_extensions_.push_back(ext);
+ rtp_header_extensions_set_ = true;
+ }
+ void ClearRtpHeaderExtensions() {
+ rtp_header_extensions_.clear();
+ rtp_header_extensions_set_ = true;
+ }
+ // We can't always tell if an empty list of header extensions is
+ // because the other side doesn't support them, or just isn't hooked up to
+ // signal them. For now we assume an empty list means no signaling, but
+ // provide the ClearRtpHeaderExtensions method to allow "no support" to be
+ // clearly indicated (i.e. when derived from other information).
+ bool rtp_header_extensions_set() const {
+ return rtp_header_extensions_set_;
+ }
+ // True iff the client supports multiple streams.
+ void set_multistream(bool multistream) { multistream_ = multistream; }
+ bool multistream() const { return multistream_; }
+ const StreamParamsVec& streams() const {
+ return streams_;
+ }
+ // TODO(pthatcher): Remove this by giving mediamessage.cc access
+ // to MediaContentDescription
+ StreamParamsVec& mutable_streams() {
+ return streams_;
+ }
+ void AddStream(const StreamParams& stream) {
+ streams_.push_back(stream);
+ }
+ // Legacy streams have an ssrc, but nothing else.
+ void AddLegacyStream(uint32 ssrc) {
+ streams_.push_back(StreamParams::CreateLegacy(ssrc));
+ }
+ void AddLegacyStream(uint32 ssrc, uint32 fid_ssrc) {
+ StreamParams sp = StreamParams::CreateLegacy(ssrc);
+ sp.AddFidSsrc(ssrc, fid_ssrc);
+ streams_.push_back(sp);
+ }
+ // Sets the CNAME of all StreamParams if it have not been set.
+ // This can be used to set the CNAME of legacy streams.
+ void SetCnameIfEmpty(const std::string& cname) {
+ for (cricket::StreamParamsVec::iterator it = streams_.begin();
+ it != streams_.end(); ++it) {
+ if (it->cname.empty())
+ it->cname = cname;
+ }
+ }
+ uint32 first_ssrc() const {
+ if (streams_.empty()) {
+ return 0;
+ }
+ return streams_[0].first_ssrc();
+ }
+ bool has_ssrcs() const {
+ if (streams_.empty()) {
+ return false;
+ }
+ return streams_[0].has_ssrcs();
+ }
+
+ void set_conference_mode(bool enable) { conference_mode_ = enable; }
+ bool conference_mode() const { return conference_mode_; }
+
+ void set_partial(bool partial) { partial_ = partial; }
+ bool partial() const { return partial_; }
+
+ void set_buffered_mode_latency(int latency) {
+ buffered_mode_latency_ = latency;
+ }
+ int buffered_mode_latency() const { return buffered_mode_latency_; }
+
+ protected:
+ bool rtcp_mux_;
+ int bandwidth_;
+ std::string protocol_;
+ std::vector<CryptoParams> cryptos_;
+ bool crypto_required_;
+ std::vector<RtpHeaderExtension> rtp_header_extensions_;
+ bool rtp_header_extensions_set_;
+ bool multistream_;
+ StreamParamsVec streams_;
+ bool conference_mode_;
+ bool partial_;
+ int buffered_mode_latency_;
+ MediaContentDirection direction_;
+};
+
+template <class C>
+class MediaContentDescriptionImpl : public MediaContentDescription {
+ public:
+ struct PreferenceSort {
+ bool operator()(C a, C b) { return a.preference > b.preference; }
+ };
+
+ const std::vector<C>& codecs() const { return codecs_; }
+ void set_codecs(const std::vector<C>& codecs) { codecs_ = codecs; }
+ virtual bool has_codecs() const { return !codecs_.empty(); }
+ bool HasCodec(int id) {
+ bool found = false;
+ for (typename std::vector<C>::iterator iter = codecs_.begin();
+ iter != codecs_.end(); ++iter) {
+ if (iter->id == id) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+ }
+ void AddCodec(const C& codec) {
+ codecs_.push_back(codec);
+ }
+ void AddCodecs(const std::vector<C>& codecs) {
+ typename std::vector<C>::const_iterator codec;
+ for (codec = codecs.begin(); codec != codecs.end(); ++codec) {
+ AddCodec(*codec);
+ }
+ }
+ void SortCodecs() {
+ std::sort(codecs_.begin(), codecs_.end(), PreferenceSort());
+ }
+
+ private:
+ std::vector<C> codecs_;
+};
+
+class AudioContentDescription : public MediaContentDescriptionImpl<AudioCodec> {
+ public:
+ AudioContentDescription() :
+ agc_minus_10db_(false) {}
+
+ virtual ContentDescription* Copy() const {
+ return new AudioContentDescription(*this);
+ }
+ virtual MediaType type() const { return MEDIA_TYPE_AUDIO; }
+
+ const std::string &lang() const { return lang_; }
+ void set_lang(const std::string &lang) { lang_ = lang; }
+
+ bool agc_minus_10db() const { return agc_minus_10db_; }
+ void set_agc_minus_10db(bool enable) {
+ agc_minus_10db_ = enable;
+ }
+
+ private:
+ bool agc_minus_10db_;
+
+ private:
+ std::string lang_;
+};
+
+class VideoContentDescription : public MediaContentDescriptionImpl<VideoCodec> {
+ public:
+ virtual ContentDescription* Copy() const {
+ return new VideoContentDescription(*this);
+ }
+ virtual MediaType type() const { return MEDIA_TYPE_VIDEO; }
+};
+
+class DataContentDescription : public MediaContentDescriptionImpl<DataCodec> {
+ public:
+ virtual ContentDescription* Copy() const {
+ return new DataContentDescription(*this);
+ }
+ virtual MediaType type() const { return MEDIA_TYPE_DATA; }
+};
+
+// Creates media session descriptions according to the supplied codecs and
+// other fields, as well as the supplied per-call options.
+// When creating answers, performs the appropriate negotiation
+// of the various fields to determine the proper result.
+class MediaSessionDescriptionFactory {
+ public:
+ // Default ctor; use methods below to set configuration.
+ // The TransportDescriptionFactory is not owned by MediaSessionDescFactory,
+ // so it must be kept alive by the user of this class.
+ explicit MediaSessionDescriptionFactory(
+ const TransportDescriptionFactory* factory);
+ // This helper automatically sets up the factory to get its configuration
+ // from the specified ChannelManager.
+ MediaSessionDescriptionFactory(ChannelManager* cmanager,
+ const TransportDescriptionFactory* factory);
+
+ const AudioCodecs& audio_codecs() const { return audio_codecs_; }
+ void set_audio_codecs(const AudioCodecs& codecs) { audio_codecs_ = codecs; }
+ void set_audio_rtp_header_extensions(const RtpHeaderExtensions& extensions) {
+ audio_rtp_extensions_ = extensions;
+ }
+ const RtpHeaderExtensions& audio_rtp_header_extensions() const {
+ return audio_rtp_extensions_;
+ }
+ const VideoCodecs& video_codecs() const { return video_codecs_; }
+ void set_video_codecs(const VideoCodecs& codecs) { video_codecs_ = codecs; }
+ void set_video_rtp_header_extensions(const RtpHeaderExtensions& extensions) {
+ video_rtp_extensions_ = extensions;
+ }
+ const RtpHeaderExtensions& video_rtp_header_extensions() const {
+ return video_rtp_extensions_;
+ }
+ const DataCodecs& data_codecs() const { return data_codecs_; }
+ void set_data_codecs(const DataCodecs& codecs) { data_codecs_ = codecs; }
+ SecurePolicy secure() const { return secure_; }
+ void set_secure(SecurePolicy s) { secure_ = s; }
+ // Decides if a StreamParams shall be added to the audio and video media
+ // content in SessionDescription when CreateOffer and CreateAnswer is called
+ // even if |options| don't include a Stream. This is needed to support legacy
+ // applications. |add_legacy_| is true per default.
+ void set_add_legacy_streams(bool add_legacy) { add_legacy_ = add_legacy; }
+
+ SessionDescription* CreateOffer(
+ const MediaSessionOptions& options,
+ const SessionDescription* current_description) const;
+ SessionDescription* CreateAnswer(
+ const SessionDescription* offer,
+ const MediaSessionOptions& options,
+ const SessionDescription* current_description) const;
+
+ private:
+ void GetCodecsToOffer(const SessionDescription* current_description,
+ AudioCodecs* audio_codecs,
+ VideoCodecs* video_codecs,
+ DataCodecs* data_codecs) const;
+ void GetRtpHdrExtsToOffer(const SessionDescription* current_description,
+ RtpHeaderExtensions* audio_extensions,
+ RtpHeaderExtensions* video_extensions) const;
+ bool AddTransportOffer(
+ const std::string& content_name,
+ const TransportOptions& transport_options,
+ const SessionDescription* current_desc,
+ SessionDescription* offer) const;
+
+ TransportDescription* CreateTransportAnswer(
+ const std::string& content_name,
+ const SessionDescription* offer_desc,
+ const TransportOptions& transport_options,
+ const SessionDescription* current_desc) const;
+
+ bool AddTransportAnswer(
+ const std::string& content_name,
+ const TransportDescription& transport_desc,
+ SessionDescription* answer_desc) const;
+
+ AudioCodecs audio_codecs_;
+ RtpHeaderExtensions audio_rtp_extensions_;
+ VideoCodecs video_codecs_;
+ RtpHeaderExtensions video_rtp_extensions_;
+ DataCodecs data_codecs_;
+ SecurePolicy secure_;
+ bool add_legacy_;
+ std::string lang_;
+ const TransportDescriptionFactory* transport_desc_factory_;
+};
+
+// Convenience functions.
+bool IsMediaContent(const ContentInfo* content);
+bool IsAudioContent(const ContentInfo* content);
+bool IsVideoContent(const ContentInfo* content);
+bool IsDataContent(const ContentInfo* content);
+const ContentInfo* GetFirstAudioContent(const ContentInfos& contents);
+const ContentInfo* GetFirstVideoContent(const ContentInfos& contents);
+const ContentInfo* GetFirstDataContent(const ContentInfos& contents);
+const ContentInfo* GetFirstAudioContent(const SessionDescription* sdesc);
+const ContentInfo* GetFirstVideoContent(const SessionDescription* sdesc);
+const ContentInfo* GetFirstDataContent(const SessionDescription* sdesc);
+const AudioContentDescription* GetFirstAudioContentDescription(
+ const SessionDescription* sdesc);
+const VideoContentDescription* GetFirstVideoContentDescription(
+ const SessionDescription* sdesc);
+const DataContentDescription* GetFirstDataContentDescription(
+ const SessionDescription* sdesc);
+bool GetStreamBySsrc(
+ const SessionDescription* sdesc, MediaType media_type,
+ uint32 ssrc, StreamParams* stream_out);
+bool GetStreamByIds(
+ const SessionDescription* sdesc, MediaType media_type,
+ const std::string& groupid, const std::string& id,
+ StreamParams* stream_out);
+
+// Functions for translating media candidate names.
+
+// For converting between media ICE component and G-ICE channel
+// names. For example:
+// "rtp" <=> 1
+// "rtcp" <=> 2
+// "video_rtp" <=> 1
+// "video_rtcp" <=> 2
+// Will not convert in the general case of arbitrary channel names,
+// but is useful for cases where we have candidates for media
+// channels.
+// returns false if there is no mapping.
+bool GetMediaChannelNameFromComponent(
+ int component, cricket::MediaType media_type, std::string* channel_name);
+bool GetMediaComponentFromChannelName(
+ const std::string& channel_name, int* component);
+bool GetMediaTypeFromChannelName(
+ const std::string& channel_name, cricket::MediaType* media_type);
+
+void GetSupportedAudioCryptoSuites(std::vector<std::string>* crypto_suites);
+void GetSupportedVideoCryptoSuites(std::vector<std::string>* crypto_suites);
+void GetSupportedDataCryptoSuites(std::vector<std::string>* crypto_suites);
+void GetSupportedDefaultCryptoSuites(std::vector<std::string>* crypto_suites);
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_MEDIASESSION_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediasession_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/mediasession_unittest.cc
new file mode 100644
index 00000000000..6e04915704a
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediasession_unittest.cc
@@ -0,0 +1,1905 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+#include <vector>
+
+#include "talk/base/gunit.h"
+#include "talk/base/fakesslidentity.h"
+#include "talk/base/messagedigest.h"
+#include "talk/media/base/codec.h"
+#include "talk/media/base/testutils.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/base/transportdescription.h"
+#include "talk/p2p/base/transportinfo.h"
+#include "talk/session/media/mediasession.h"
+#include "talk/session/media/srtpfilter.h"
+
+#ifdef HAVE_SRTP
+#define ASSERT_CRYPTO(cd, s, cs) \
+ ASSERT_FALSE(cd->crypto_required()); \
+ ASSERT_EQ(s, cd->cryptos().size()); \
+ ASSERT_EQ(std::string(cs), cd->cryptos()[0].cipher_suite)
+#else
+#define ASSERT_CRYPTO(cd, s, cs) \
+ ASSERT_FALSE(cd->crypto_required()); \
+ ASSERT_EQ(0U, cd->cryptos().size());
+#endif
+
+typedef std::vector<cricket::Candidate> Candidates;
+
+using cricket::MediaContentDescription;
+using cricket::MediaSessionDescriptionFactory;
+using cricket::MediaSessionOptions;
+using cricket::MediaType;
+using cricket::SessionDescription;
+using cricket::SsrcGroup;
+using cricket::StreamParams;
+using cricket::StreamParamsVec;
+using cricket::TransportDescription;
+using cricket::TransportDescriptionFactory;
+using cricket::TransportInfo;
+using cricket::ContentInfo;
+using cricket::CryptoParamsVec;
+using cricket::AudioContentDescription;
+using cricket::VideoContentDescription;
+using cricket::DataContentDescription;
+using cricket::GetFirstAudioContentDescription;
+using cricket::GetFirstVideoContentDescription;
+using cricket::GetFirstDataContentDescription;
+using cricket::kAutoBandwidth;
+using cricket::AudioCodec;
+using cricket::VideoCodec;
+using cricket::DataCodec;
+using cricket::NS_JINGLE_RTP;
+using cricket::MEDIA_TYPE_AUDIO;
+using cricket::MEDIA_TYPE_VIDEO;
+using cricket::MEDIA_TYPE_DATA;
+using cricket::RtpHeaderExtension;
+using cricket::SEC_DISABLED;
+using cricket::SEC_ENABLED;
+using cricket::SEC_REQUIRED;
+using cricket::CS_AES_CM_128_HMAC_SHA1_32;
+using cricket::CS_AES_CM_128_HMAC_SHA1_80;
+
+static const AudioCodec kAudioCodecs1[] = {
+ AudioCodec(103, "ISAC", 16000, -1, 1, 6),
+ AudioCodec(102, "iLBC", 8000, 13300, 1, 5),
+ AudioCodec(0, "PCMU", 8000, 64000, 1, 4),
+ AudioCodec(8, "PCMA", 8000, 64000, 1, 3),
+ AudioCodec(117, "red", 8000, 0, 1, 2),
+ AudioCodec(107, "CN", 48000, 0, 1, 1)
+};
+
+static const AudioCodec kAudioCodecs2[] = {
+ AudioCodec(126, "speex", 16000, 22000, 1, 3),
+ AudioCodec(127, "iLBC", 8000, 13300, 1, 2),
+ AudioCodec(0, "PCMU", 8000, 64000, 1, 1),
+};
+
+static const AudioCodec kAudioCodecsAnswer[] = {
+ AudioCodec(102, "iLBC", 8000, 13300, 1, 2),
+ AudioCodec(0, "PCMU", 8000, 64000, 1, 1),
+};
+
+static const VideoCodec kVideoCodecs1[] = {
+ VideoCodec(96, "H264-SVC", 320, 200, 30, 2),
+ VideoCodec(97, "H264", 320, 200, 30, 1)
+};
+
+static const VideoCodec kVideoCodecs2[] = {
+ VideoCodec(126, "H264", 320, 200, 30, 2),
+ VideoCodec(127, "H263", 320, 200, 30, 1)
+};
+
+static const VideoCodec kVideoCodecsAnswer[] = {
+ VideoCodec(97, "H264", 320, 200, 30, 2)
+};
+
+static const DataCodec kDataCodecs1[] = {
+ DataCodec(98, "binary-data", 2),
+ DataCodec(99, "utf8-text", 1)
+};
+
+static const DataCodec kDataCodecs2[] = {
+ DataCodec(126, "binary-data", 2),
+ DataCodec(127, "utf8-text", 1)
+};
+
+static const DataCodec kDataCodecsAnswer[] = {
+ DataCodec(98, "binary-data", 2),
+ DataCodec(99, "utf8-text", 1)
+};
+
+static const RtpHeaderExtension kAudioRtpExtension1[] = {
+ RtpHeaderExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8),
+ RtpHeaderExtension("http://google.com/testing/audio_something", 10),
+};
+
+static const RtpHeaderExtension kAudioRtpExtension2[] = {
+ RtpHeaderExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 2),
+ RtpHeaderExtension("http://google.com/testing/audio_something_else", 8),
+};
+
+static const RtpHeaderExtension kAudioRtpExtensionAnswer[] = {
+ RtpHeaderExtension("urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8),
+};
+
+static const RtpHeaderExtension kVideoRtpExtension1[] = {
+ RtpHeaderExtension("urn:ietf:params:rtp-hdrext:toffset", 14),
+ RtpHeaderExtension("http://google.com/testing/video_something", 15),
+};
+
+static const RtpHeaderExtension kVideoRtpExtension2[] = {
+ RtpHeaderExtension("urn:ietf:params:rtp-hdrext:toffset", 2),
+ RtpHeaderExtension("http://google.com/testing/video_something_else", 14),
+};
+
+static const RtpHeaderExtension kVideoRtpExtensionAnswer[] = {
+ RtpHeaderExtension("urn:ietf:params:rtp-hdrext:toffset", 14),
+};
+
+static const uint32 kFec1Ssrc[] = {10, 11};
+static const uint32 kFec2Ssrc[] = {20, 21};
+static const uint32 kFec3Ssrc[] = {30, 31};
+
+static const char kMediaStream1[] = "stream_1";
+static const char kMediaStream2[] = "stream_2";
+static const char kVideoTrack1[] = "video_1";
+static const char kVideoTrack2[] = "video_2";
+static const char kAudioTrack1[] = "audio_1";
+static const char kAudioTrack2[] = "audio_2";
+static const char kAudioTrack3[] = "audio_3";
+static const char kDataTrack1[] = "data_1";
+static const char kDataTrack2[] = "data_2";
+static const char kDataTrack3[] = "data_3";
+
+class MediaSessionDescriptionFactoryTest : public testing::Test {
+ public:
+ MediaSessionDescriptionFactoryTest()
+ : f1_(&tdf1_), f2_(&tdf2_), id1_("id1"), id2_("id2") {
+ f1_.set_audio_codecs(MAKE_VECTOR(kAudioCodecs1));
+ f1_.set_video_codecs(MAKE_VECTOR(kVideoCodecs1));
+ f1_.set_data_codecs(MAKE_VECTOR(kDataCodecs1));
+ f2_.set_audio_codecs(MAKE_VECTOR(kAudioCodecs2));
+ f2_.set_video_codecs(MAKE_VECTOR(kVideoCodecs2));
+ f2_.set_data_codecs(MAKE_VECTOR(kDataCodecs2));
+ tdf1_.set_identity(&id1_);
+ tdf2_.set_identity(&id2_);
+ }
+
+
+ bool CompareCryptoParams(const CryptoParamsVec& c1,
+ const CryptoParamsVec& c2) {
+ if (c1.size() != c2.size())
+ return false;
+ for (size_t i = 0; i < c1.size(); ++i)
+ if (c1[i].tag != c2[i].tag || c1[i].cipher_suite != c2[i].cipher_suite ||
+ c1[i].key_params != c2[i].key_params ||
+ c1[i].session_params != c2[i].session_params)
+ return false;
+ return true;
+ }
+
+ void TestTransportInfo(bool offer, const MediaSessionOptions& options,
+ bool has_current_desc) {
+ const std::string current_audio_ufrag = "current_audio_ufrag";
+ const std::string current_audio_pwd = "current_audio_pwd";
+ const std::string current_video_ufrag = "current_video_ufrag";
+ const std::string current_video_pwd = "current_video_pwd";
+ const std::string current_data_ufrag = "current_data_ufrag";
+ const std::string current_data_pwd = "current_data_pwd";
+ talk_base::scoped_ptr<SessionDescription> current_desc;
+ talk_base::scoped_ptr<SessionDescription> desc;
+ if (has_current_desc) {
+ current_desc.reset(new SessionDescription());
+ EXPECT_TRUE(current_desc->AddTransportInfo(
+ TransportInfo("audio",
+ TransportDescription("", std::vector<std::string>(),
+ current_audio_ufrag,
+ current_audio_pwd,
+ cricket::ICEMODE_FULL,
+ NULL, Candidates()))));
+ EXPECT_TRUE(current_desc->AddTransportInfo(
+ TransportInfo("video",
+ TransportDescription("", std::vector<std::string>(),
+ current_video_ufrag,
+ current_video_pwd,
+ cricket::ICEMODE_FULL,
+ NULL, Candidates()))));
+ EXPECT_TRUE(current_desc->AddTransportInfo(
+ TransportInfo("data",
+ TransportDescription("", std::vector<std::string>(),
+ current_data_ufrag,
+ current_data_pwd,
+ cricket::ICEMODE_FULL,
+ NULL, Candidates()))));
+ }
+ if (offer) {
+ desc.reset(f1_.CreateOffer(options, current_desc.get()));
+ } else {
+ talk_base::scoped_ptr<SessionDescription> offer;
+ offer.reset(f1_.CreateOffer(options, NULL));
+ desc.reset(f1_.CreateAnswer(offer.get(), options, current_desc.get()));
+ }
+ ASSERT_TRUE(desc.get() != NULL);
+ const TransportInfo* ti_audio = desc->GetTransportInfoByName("audio");
+ if (options.has_audio) {
+ EXPECT_TRUE(ti_audio != NULL);
+ if (has_current_desc) {
+ EXPECT_EQ(current_audio_ufrag, ti_audio->description.ice_ufrag);
+ EXPECT_EQ(current_audio_pwd, ti_audio->description.ice_pwd);
+ } else {
+ EXPECT_EQ(static_cast<size_t>(cricket::ICE_UFRAG_LENGTH),
+ ti_audio->description.ice_ufrag.size());
+ EXPECT_EQ(static_cast<size_t>(cricket::ICE_PWD_LENGTH),
+ ti_audio->description.ice_pwd.size());
+ }
+
+ } else {
+ EXPECT_TRUE(ti_audio == NULL);
+ }
+ const TransportInfo* ti_video = desc->GetTransportInfoByName("video");
+ if (options.has_video) {
+ EXPECT_TRUE(ti_video != NULL);
+ if (options.bundle_enabled) {
+ EXPECT_EQ(ti_audio->description.ice_ufrag,
+ ti_video->description.ice_ufrag);
+ EXPECT_EQ(ti_audio->description.ice_pwd,
+ ti_video->description.ice_pwd);
+ } else {
+ if (has_current_desc) {
+ EXPECT_EQ(current_video_ufrag, ti_video->description.ice_ufrag);
+ EXPECT_EQ(current_video_pwd, ti_video->description.ice_pwd);
+ } else {
+ EXPECT_EQ(static_cast<size_t>(cricket::ICE_UFRAG_LENGTH),
+ ti_video->description.ice_ufrag.size());
+ EXPECT_EQ(static_cast<size_t>(cricket::ICE_PWD_LENGTH),
+ ti_video->description.ice_pwd.size());
+ }
+ }
+ } else {
+ EXPECT_TRUE(ti_video == NULL);
+ }
+ const TransportInfo* ti_data = desc->GetTransportInfoByName("data");
+ if (options.has_data()) {
+ EXPECT_TRUE(ti_data != NULL);
+ if (options.bundle_enabled) {
+ EXPECT_EQ(ti_audio->description.ice_ufrag,
+ ti_data->description.ice_ufrag);
+ EXPECT_EQ(ti_audio->description.ice_pwd,
+ ti_data->description.ice_pwd);
+ } else {
+ if (has_current_desc) {
+ EXPECT_EQ(current_data_ufrag, ti_data->description.ice_ufrag);
+ EXPECT_EQ(current_data_pwd, ti_data->description.ice_pwd);
+ } else {
+ EXPECT_EQ(static_cast<size_t>(cricket::ICE_UFRAG_LENGTH),
+ ti_data->description.ice_ufrag.size());
+ EXPECT_EQ(static_cast<size_t>(cricket::ICE_PWD_LENGTH),
+ ti_data->description.ice_pwd.size());
+ }
+ }
+ } else {
+ EXPECT_TRUE(ti_video == NULL);
+ }
+ }
+
+ void TestCryptoWithBundle(bool offer) {
+ f1_.set_secure(SEC_ENABLED);
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ talk_base::scoped_ptr<SessionDescription> ref_desc;
+ talk_base::scoped_ptr<SessionDescription> desc;
+ if (offer) {
+ options.bundle_enabled = false;
+ ref_desc.reset(f1_.CreateOffer(options, NULL));
+ options.bundle_enabled = true;
+ desc.reset(f1_.CreateOffer(options, ref_desc.get()));
+ } else {
+ options.bundle_enabled = true;
+ ref_desc.reset(f1_.CreateOffer(options, NULL));
+ desc.reset(f1_.CreateAnswer(ref_desc.get(), options, NULL));
+ }
+ ASSERT_TRUE(desc.get() != NULL);
+ const cricket::MediaContentDescription* audio_media_desc =
+ static_cast<const cricket::MediaContentDescription*>(
+ desc.get()->GetContentDescriptionByName("audio"));
+ ASSERT_TRUE(audio_media_desc != NULL);
+ const cricket::MediaContentDescription* video_media_desc =
+ static_cast<const cricket::MediaContentDescription*>(
+ desc.get()->GetContentDescriptionByName("video"));
+ ASSERT_TRUE(video_media_desc != NULL);
+ EXPECT_TRUE(CompareCryptoParams(audio_media_desc->cryptos(),
+ video_media_desc->cryptos()));
+ EXPECT_EQ(1u, audio_media_desc->cryptos().size());
+ EXPECT_EQ(std::string(CS_AES_CM_128_HMAC_SHA1_80),
+ audio_media_desc->cryptos()[0].cipher_suite);
+
+ // Verify the selected crypto is one from the reference audio
+ // media content.
+ const cricket::MediaContentDescription* ref_audio_media_desc =
+ static_cast<const cricket::MediaContentDescription*>(
+ ref_desc.get()->GetContentDescriptionByName("audio"));
+ bool found = false;
+ for (size_t i = 0; i < ref_audio_media_desc->cryptos().size(); ++i) {
+ if (ref_audio_media_desc->cryptos()[i].Matches(
+ audio_media_desc->cryptos()[0])) {
+ found = true;
+ break;
+ }
+ }
+ EXPECT_TRUE(found);
+ }
+
+ // This test that the audio and video media direction is set to
+ // |expected_direction_in_answer| in an answer if the offer direction is set
+ // to |direction_in_offer|.
+ void TestMediaDirectionInAnswer(
+ cricket::MediaContentDirection direction_in_offer,
+ cricket::MediaContentDirection expected_direction_in_answer) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ talk_base::scoped_ptr<SessionDescription> offer(
+ f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ ContentInfo* ac_offer= offer->GetContentByName("audio");
+ ASSERT_TRUE(ac_offer != NULL);
+ AudioContentDescription* acd_offer =
+ static_cast<AudioContentDescription*>(ac_offer->description);
+ acd_offer->set_direction(direction_in_offer);
+ ContentInfo* vc_offer= offer->GetContentByName("video");
+ ASSERT_TRUE(vc_offer != NULL);
+ VideoContentDescription* vcd_offer =
+ static_cast<VideoContentDescription*>(vc_offer->description);
+ vcd_offer->set_direction(direction_in_offer);
+
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+ const AudioContentDescription* acd_answer =
+ GetFirstAudioContentDescription(answer.get());
+ EXPECT_EQ(expected_direction_in_answer, acd_answer->direction());
+ const VideoContentDescription* vcd_answer =
+ GetFirstVideoContentDescription(answer.get());
+ EXPECT_EQ(expected_direction_in_answer, vcd_answer->direction());
+ }
+
+ bool VerifyNoCNCodecs(const cricket::ContentInfo* content) {
+ const cricket::ContentDescription* description = content->description;
+ ASSERT(description != NULL);
+ const cricket::AudioContentDescription* audio_content_desc =
+ static_cast<const cricket::AudioContentDescription*>(description);
+ ASSERT(audio_content_desc != NULL);
+ for (size_t i = 0; i < audio_content_desc->codecs().size(); ++i) {
+ if (audio_content_desc->codecs()[i].name == "CN")
+ return false;
+ }
+ return true;
+ }
+
+ protected:
+ MediaSessionDescriptionFactory f1_;
+ MediaSessionDescriptionFactory f2_;
+ TransportDescriptionFactory tdf1_;
+ TransportDescriptionFactory tdf2_;
+ talk_base::FakeSSLIdentity id1_;
+ talk_base::FakeSSLIdentity id2_;
+};
+
+// Create a typical audio offer, and ensure it matches what we expect.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioOffer) {
+ f1_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription> offer(
+ f1_.CreateOffer(MediaSessionOptions(), NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ const ContentInfo* ac = offer->GetContentByName("audio");
+ const ContentInfo* vc = offer->GetContentByName("video");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc == NULL);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), ac->type);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
+ EXPECT_EQ(f1_.audio_codecs(), acd->codecs());
+ EXPECT_NE(0U, acd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(acd->rtcp_mux()); // rtcp-mux defaults on
+ ASSERT_CRYPTO(acd, 2U, CS_AES_CM_128_HMAC_SHA1_32);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), acd->protocol());
+}
+
+// Create a typical video offer, and ensure it matches what we expect.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoOffer) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ f1_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription>
+ offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ const ContentInfo* ac = offer->GetContentByName("audio");
+ const ContentInfo* vc = offer->GetContentByName("video");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), ac->type);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), vc->type);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const VideoContentDescription* vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+ EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
+ EXPECT_EQ(f1_.audio_codecs(), acd->codecs());
+ EXPECT_NE(0U, acd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(acd->rtcp_mux()); // rtcp-mux defaults on
+ ASSERT_CRYPTO(acd, 2U, CS_AES_CM_128_HMAC_SHA1_32);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), acd->protocol());
+ EXPECT_EQ(MEDIA_TYPE_VIDEO, vcd->type());
+ EXPECT_EQ(f1_.video_codecs(), vcd->codecs());
+ EXPECT_NE(0U, vcd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_EQ(kAutoBandwidth, vcd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(vcd->rtcp_mux()); // rtcp-mux defaults on
+ ASSERT_CRYPTO(vcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), vcd->protocol());
+}
+
+// Test creating an offer with bundle where the Codecs have the same dynamic
+// RTP playlod type. The test verifies that the offer don't contain the
+// duplicate RTP payload types.
+TEST_F(MediaSessionDescriptionFactoryTest, TestBundleOfferWithSameCodecPlType) {
+ const VideoCodec& offered_video_codec = f2_.video_codecs()[0];
+ const AudioCodec& offered_audio_codec = f2_.audio_codecs()[0];
+ const DataCodec& offered_data_codec = f2_.data_codecs()[0];
+ ASSERT_EQ(offered_video_codec.id, offered_audio_codec.id);
+ ASSERT_EQ(offered_video_codec.id, offered_data_codec.id);
+
+ MediaSessionOptions opts;
+ opts.has_audio = true;
+ opts.has_video = true;
+ opts.data_channel_type = cricket::DCT_RTP;
+ opts.bundle_enabled = true;
+ talk_base::scoped_ptr<SessionDescription>
+ offer(f2_.CreateOffer(opts, NULL));
+ const VideoContentDescription* vcd =
+ GetFirstVideoContentDescription(offer.get());
+ const AudioContentDescription* acd =
+ GetFirstAudioContentDescription(offer.get());
+ const DataContentDescription* dcd =
+ GetFirstDataContentDescription(offer.get());
+ ASSERT_TRUE(NULL != vcd);
+ ASSERT_TRUE(NULL != acd);
+ ASSERT_TRUE(NULL != dcd);
+ EXPECT_NE(vcd->codecs()[0].id, acd->codecs()[0].id);
+ EXPECT_NE(vcd->codecs()[0].id, dcd->codecs()[0].id);
+ EXPECT_NE(acd->codecs()[0].id, dcd->codecs()[0].id);
+ EXPECT_EQ(vcd->codecs()[0].name, offered_video_codec.name);
+ EXPECT_EQ(acd->codecs()[0].name, offered_audio_codec.name);
+ EXPECT_EQ(dcd->codecs()[0].name, offered_data_codec.name);
+}
+
+// Test creating an updated offer with with bundle, audio, video and data
+// after an audio only session has been negotiated.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ TestCreateUpdatedVideoOfferWithBundle) {
+ f1_.set_secure(SEC_ENABLED);
+ f2_.set_secure(SEC_ENABLED);
+ MediaSessionOptions opts;
+ opts.has_audio = true;
+ opts.has_video = false;
+ opts.data_channel_type = cricket::DCT_NONE;
+ opts.bundle_enabled = true;
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ MediaSessionOptions updated_opts;
+ updated_opts.has_audio = true;
+ updated_opts.has_video = true;
+ updated_opts.data_channel_type = cricket::DCT_RTP;
+ updated_opts.bundle_enabled = true;
+ talk_base::scoped_ptr<SessionDescription> updated_offer(f1_.CreateOffer(
+ updated_opts, answer.get()));
+
+ const AudioContentDescription* acd =
+ GetFirstAudioContentDescription(updated_offer.get());
+ const VideoContentDescription* vcd =
+ GetFirstVideoContentDescription(updated_offer.get());
+ const DataContentDescription* dcd =
+ GetFirstDataContentDescription(updated_offer.get());
+ EXPECT_TRUE(NULL != vcd);
+ EXPECT_TRUE(NULL != acd);
+ EXPECT_TRUE(NULL != dcd);
+
+ ASSERT_CRYPTO(acd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), acd->protocol());
+ ASSERT_CRYPTO(vcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), vcd->protocol());
+ ASSERT_CRYPTO(dcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), dcd->protocol());
+}
+// Create a typical data offer, and ensure it matches what we expect.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataOffer) {
+ MediaSessionOptions opts;
+ opts.data_channel_type = cricket::DCT_RTP;
+ f1_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription>
+ offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ const ContentInfo* ac = offer->GetContentByName("audio");
+ const ContentInfo* dc = offer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(dc != NULL);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), ac->type);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), dc->type);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const DataContentDescription* dcd =
+ static_cast<const DataContentDescription*>(dc->description);
+ EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
+ EXPECT_EQ(f1_.audio_codecs(), acd->codecs());
+ EXPECT_NE(0U, acd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(acd->rtcp_mux()); // rtcp-mux defaults on
+ ASSERT_CRYPTO(acd, 2U, CS_AES_CM_128_HMAC_SHA1_32);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), acd->protocol());
+ EXPECT_EQ(MEDIA_TYPE_DATA, dcd->type());
+ EXPECT_EQ(f1_.data_codecs(), dcd->codecs());
+ EXPECT_NE(0U, dcd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_EQ(cricket::kDataMaxBandwidth,
+ dcd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(dcd->rtcp_mux()); // rtcp-mux defaults on
+ ASSERT_CRYPTO(dcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), dcd->protocol());
+}
+
+// Create an audio, video offer without legacy StreamParams.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ TestCreateOfferWithoutLegacyStreams) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ f1_.set_add_legacy_streams(false);
+ talk_base::scoped_ptr<SessionDescription>
+ offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ const ContentInfo* ac = offer->GetContentByName("audio");
+ const ContentInfo* vc = offer->GetContentByName("video");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const VideoContentDescription* vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+
+ EXPECT_FALSE(vcd->has_ssrcs()); // No StreamParams.
+ EXPECT_FALSE(acd->has_ssrcs()); // No StreamParams.
+}
+
+// Create a typical audio answer, and ensure it matches what we expect.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioAnswer) {
+ f1_.set_secure(SEC_ENABLED);
+ f2_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription> offer(
+ f1_.CreateOffer(MediaSessionOptions(), NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), MediaSessionOptions(), NULL));
+ const ContentInfo* ac = answer->GetContentByName("audio");
+ const ContentInfo* vc = answer->GetContentByName("video");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc == NULL);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), ac->type);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
+ EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
+ EXPECT_NE(0U, acd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
+ EXPECT_TRUE(acd->rtcp_mux()); // negotiated rtcp-mux
+ ASSERT_CRYPTO(acd, 1U, CS_AES_CM_128_HMAC_SHA1_32);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), acd->protocol());
+}
+
+// Create a typical video answer, and ensure it matches what we expect.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswer) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ f1_.set_secure(SEC_ENABLED);
+ f2_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+ const ContentInfo* ac = answer->GetContentByName("audio");
+ const ContentInfo* vc = answer->GetContentByName("video");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), ac->type);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), vc->type);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const VideoContentDescription* vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+ EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
+ EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
+ EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
+ EXPECT_NE(0U, acd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_TRUE(acd->rtcp_mux()); // negotiated rtcp-mux
+ ASSERT_CRYPTO(acd, 1U, CS_AES_CM_128_HMAC_SHA1_32);
+ EXPECT_EQ(MEDIA_TYPE_VIDEO, vcd->type());
+ EXPECT_EQ(MAKE_VECTOR(kVideoCodecsAnswer), vcd->codecs());
+ EXPECT_NE(0U, vcd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_TRUE(vcd->rtcp_mux()); // negotiated rtcp-mux
+ ASSERT_CRYPTO(vcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), vcd->protocol());
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateDataAnswer) {
+ MediaSessionOptions opts;
+ opts.data_channel_type = cricket::DCT_RTP;
+ f1_.set_secure(SEC_ENABLED);
+ f2_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+ const ContentInfo* ac = answer->GetContentByName("audio");
+ const ContentInfo* vc = answer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), ac->type);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), vc->type);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const DataContentDescription* vcd =
+ static_cast<const DataContentDescription*>(vc->description);
+ EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
+ EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
+ EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // negotiated auto bw
+ EXPECT_NE(0U, acd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_TRUE(acd->rtcp_mux()); // negotiated rtcp-mux
+ ASSERT_CRYPTO(acd, 1U, CS_AES_CM_128_HMAC_SHA1_32);
+ EXPECT_EQ(MEDIA_TYPE_DATA, vcd->type());
+ EXPECT_EQ(MAKE_VECTOR(kDataCodecsAnswer), vcd->codecs());
+ EXPECT_NE(0U, vcd->first_ssrc()); // a random nonzero ssrc
+ EXPECT_TRUE(vcd->rtcp_mux()); // negotiated rtcp-mux
+ ASSERT_CRYPTO(vcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf), vcd->protocol());
+}
+
+// This test that the media direction is set to send/receive in an answer if
+// the offer is send receive.
+TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToSendReceiveOffer) {
+ TestMediaDirectionInAnswer(cricket::MD_SENDRECV, cricket::MD_SENDRECV);
+}
+
+// This test that the media direction is set to receive only in an answer if
+// the offer is send only.
+TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToSendOnlyOffer) {
+ TestMediaDirectionInAnswer(cricket::MD_SENDONLY, cricket::MD_RECVONLY);
+}
+
+// This test that the media direction is set to send only in an answer if
+// the offer is recv only.
+TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToRecvOnlyOffer) {
+ TestMediaDirectionInAnswer(cricket::MD_RECVONLY, cricket::MD_SENDONLY);
+}
+
+// This test that the media direction is set to inactive in an answer if
+// the offer is inactive.
+TEST_F(MediaSessionDescriptionFactoryTest, CreateAnswerToInactiveOffer) {
+ TestMediaDirectionInAnswer(cricket::MD_INACTIVE, cricket::MD_INACTIVE);
+}
+
+// Test that a data content with an unknown protocol is rejected in an answer.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ CreateDataAnswerToOfferWithUnknownProtocol) {
+ MediaSessionOptions opts;
+ opts.data_channel_type = cricket::DCT_RTP;
+ opts.has_audio = false;
+ f1_.set_secure(SEC_ENABLED);
+ f2_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ ContentInfo* dc_offer= offer->GetContentByName("data");
+ ASSERT_TRUE(dc_offer != NULL);
+ DataContentDescription* dcd_offer =
+ static_cast<DataContentDescription*>(dc_offer->description);
+ ASSERT_TRUE(dcd_offer != NULL);
+ std::string protocol = "a weird unknown protocol";
+ dcd_offer->set_protocol(protocol);
+
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ const ContentInfo* dc_answer = answer->GetContentByName("data");
+ ASSERT_TRUE(dc_answer != NULL);
+ EXPECT_TRUE(dc_answer->rejected);
+ const DataContentDescription* dcd_answer =
+ static_cast<const DataContentDescription*>(dc_answer->description);
+ ASSERT_TRUE(dcd_answer != NULL);
+ EXPECT_EQ(protocol, dcd_answer->protocol());
+}
+
+// Test that the media protocol is RTP/AVPF if DTLS and SDES are disabled.
+TEST_F(MediaSessionDescriptionFactoryTest, AudioOfferAnswerWithCryptoDisabled) {
+ MediaSessionOptions opts;
+ f1_.set_secure(SEC_DISABLED);
+ f2_.set_secure(SEC_DISABLED);
+ tdf1_.set_secure(SEC_DISABLED);
+ tdf2_.set_secure(SEC_DISABLED);
+
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ const AudioContentDescription* offer_acd =
+ GetFirstAudioContentDescription(offer.get());
+ ASSERT_TRUE(offer_acd != NULL);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolAvpf), offer_acd->protocol());
+
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ const ContentInfo* ac_answer = answer->GetContentByName("audio");
+ ASSERT_TRUE(ac_answer != NULL);
+ EXPECT_FALSE(ac_answer->rejected);
+
+ const AudioContentDescription* answer_acd =
+ GetFirstAudioContentDescription(answer.get());
+ ASSERT_TRUE(answer_acd != NULL);
+ EXPECT_EQ(std::string(cricket::kMediaProtocolAvpf), answer_acd->protocol());
+}
+
+// Create a video offer and answer and ensure the RTP header extensions
+// matches what we expect.
+TEST_F(MediaSessionDescriptionFactoryTest, TestOfferAnswerWithRtpExtensions) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+
+ f1_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension1));
+ f1_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension1));
+ f2_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension2));
+ f2_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension2));
+
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ EXPECT_EQ(MAKE_VECTOR(kAudioRtpExtension1),
+ GetFirstAudioContentDescription(
+ offer.get())->rtp_header_extensions());
+ EXPECT_EQ(MAKE_VECTOR(kVideoRtpExtension1),
+ GetFirstVideoContentDescription(
+ offer.get())->rtp_header_extensions());
+ EXPECT_EQ(MAKE_VECTOR(kAudioRtpExtensionAnswer),
+ GetFirstAudioContentDescription(
+ answer.get())->rtp_header_extensions());
+ EXPECT_EQ(MAKE_VECTOR(kVideoRtpExtensionAnswer),
+ GetFirstVideoContentDescription(
+ answer.get())->rtp_header_extensions());
+}
+
+// Create an audio, video, data answer without legacy StreamParams.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ TestCreateAnswerWithoutLegacyStreams) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ opts.data_channel_type = cricket::DCT_RTP;
+ f1_.set_add_legacy_streams(false);
+ f2_.set_add_legacy_streams(false);
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+ const ContentInfo* ac = answer->GetContentByName("audio");
+ const ContentInfo* vc = answer->GetContentByName("video");
+ const ContentInfo* dc = answer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const VideoContentDescription* vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+ const DataContentDescription* dcd =
+ static_cast<const DataContentDescription*>(dc->description);
+
+ EXPECT_FALSE(acd->has_ssrcs()); // No StreamParams.
+ EXPECT_FALSE(vcd->has_ssrcs()); // No StreamParams.
+ EXPECT_FALSE(dcd->has_ssrcs()); // No StreamParams.
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest, TestPartial) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ opts.data_channel_type = cricket::DCT_RTP;
+ f1_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription>
+ offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ const ContentInfo* ac = offer->GetContentByName("audio");
+ const ContentInfo* vc = offer->GetContentByName("video");
+ const ContentInfo* dc = offer->GetContentByName("data");
+ AudioContentDescription* acd = const_cast<AudioContentDescription*>(
+ static_cast<const AudioContentDescription*>(ac->description));
+ VideoContentDescription* vcd = const_cast<VideoContentDescription*>(
+ static_cast<const VideoContentDescription*>(vc->description));
+ DataContentDescription* dcd = const_cast<DataContentDescription*>(
+ static_cast<const DataContentDescription*>(dc->description));
+
+ EXPECT_FALSE(acd->partial()); // default is false.
+ acd->set_partial(true);
+ EXPECT_TRUE(acd->partial());
+ acd->set_partial(false);
+ EXPECT_FALSE(acd->partial());
+
+ EXPECT_FALSE(vcd->partial()); // default is false.
+ vcd->set_partial(true);
+ EXPECT_TRUE(vcd->partial());
+ vcd->set_partial(false);
+ EXPECT_FALSE(vcd->partial());
+
+ EXPECT_FALSE(dcd->partial()); // default is false.
+ dcd->set_partial(true);
+ EXPECT_TRUE(dcd->partial());
+ dcd->set_partial(false);
+ EXPECT_FALSE(dcd->partial());
+}
+
+// Create a typical video answer, and ensure it matches what we expect.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateVideoAnswerRtcpMux) {
+ MediaSessionOptions offer_opts;
+ MediaSessionOptions answer_opts;
+ answer_opts.has_video = true;
+ offer_opts.has_video = true;
+ answer_opts.data_channel_type = cricket::DCT_RTP;
+ offer_opts.data_channel_type = cricket::DCT_RTP;
+
+ talk_base::scoped_ptr<SessionDescription> offer(NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(NULL);
+
+ offer_opts.rtcp_mux_enabled = true;
+ answer_opts.rtcp_mux_enabled = true;
+
+ offer.reset(f1_.CreateOffer(offer_opts, NULL));
+ answer.reset(f2_.CreateAnswer(offer.get(), answer_opts, NULL));
+ ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstDataContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get()));
+ ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get()));
+ ASSERT_TRUE(NULL != GetFirstDataContentDescription(answer.get()));
+ EXPECT_TRUE(GetFirstAudioContentDescription(offer.get())->rtcp_mux());
+ EXPECT_TRUE(GetFirstVideoContentDescription(offer.get())->rtcp_mux());
+ EXPECT_TRUE(GetFirstDataContentDescription(offer.get())->rtcp_mux());
+ EXPECT_TRUE(GetFirstAudioContentDescription(answer.get())->rtcp_mux());
+ EXPECT_TRUE(GetFirstVideoContentDescription(answer.get())->rtcp_mux());
+ EXPECT_TRUE(GetFirstDataContentDescription(answer.get())->rtcp_mux());
+
+ offer_opts.rtcp_mux_enabled = true;
+ answer_opts.rtcp_mux_enabled = false;
+
+ offer.reset(f1_.CreateOffer(offer_opts, NULL));
+ answer.reset(f2_.CreateAnswer(offer.get(), answer_opts, NULL));
+ ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstDataContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get()));
+ ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get()));
+ ASSERT_TRUE(NULL != GetFirstDataContentDescription(answer.get()));
+ EXPECT_TRUE(GetFirstAudioContentDescription(offer.get())->rtcp_mux());
+ EXPECT_TRUE(GetFirstVideoContentDescription(offer.get())->rtcp_mux());
+ EXPECT_TRUE(GetFirstDataContentDescription(offer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstAudioContentDescription(answer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstVideoContentDescription(answer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstDataContentDescription(answer.get())->rtcp_mux());
+
+ offer_opts.rtcp_mux_enabled = false;
+ answer_opts.rtcp_mux_enabled = true;
+
+ offer.reset(f1_.CreateOffer(offer_opts, NULL));
+ answer.reset(f2_.CreateAnswer(offer.get(), answer_opts, NULL));
+ ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstDataContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get()));
+ ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get()));
+ ASSERT_TRUE(NULL != GetFirstDataContentDescription(answer.get()));
+ EXPECT_FALSE(GetFirstAudioContentDescription(offer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstVideoContentDescription(offer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstDataContentDescription(offer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstAudioContentDescription(answer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstVideoContentDescription(answer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstDataContentDescription(answer.get())->rtcp_mux());
+
+ offer_opts.rtcp_mux_enabled = false;
+ answer_opts.rtcp_mux_enabled = false;
+
+ offer.reset(f1_.CreateOffer(offer_opts, NULL));
+ answer.reset(f2_.CreateAnswer(offer.get(), answer_opts, NULL));
+ ASSERT_TRUE(NULL != GetFirstAudioContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstVideoContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstDataContentDescription(offer.get()));
+ ASSERT_TRUE(NULL != GetFirstAudioContentDescription(answer.get()));
+ ASSERT_TRUE(NULL != GetFirstVideoContentDescription(answer.get()));
+ ASSERT_TRUE(NULL != GetFirstDataContentDescription(answer.get()));
+ EXPECT_FALSE(GetFirstAudioContentDescription(offer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstVideoContentDescription(offer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstDataContentDescription(offer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstAudioContentDescription(answer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstVideoContentDescription(answer.get())->rtcp_mux());
+ EXPECT_FALSE(GetFirstDataContentDescription(answer.get())->rtcp_mux());
+}
+
+// Create an audio-only answer to a video offer.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateAudioAnswerToVideo) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ talk_base::scoped_ptr<SessionDescription>
+ offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), MediaSessionOptions(), NULL));
+ const ContentInfo* ac = answer->GetContentByName("audio");
+ const ContentInfo* vc = answer->GetContentByName("video");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ ASSERT_TRUE(vc->description != NULL);
+ EXPECT_TRUE(vc->rejected);
+}
+
+// Create an audio-only answer to an offer with data.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateNoDataAnswerToDataOffer) {
+ MediaSessionOptions opts;
+ opts.data_channel_type = cricket::DCT_RTP;
+ talk_base::scoped_ptr<SessionDescription>
+ offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), MediaSessionOptions(), NULL));
+ const ContentInfo* ac = answer->GetContentByName("audio");
+ const ContentInfo* dc = answer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(dc != NULL);
+ ASSERT_TRUE(dc->description != NULL);
+ EXPECT_TRUE(dc->rejected);
+}
+
+// Create an answer that rejects the contents which are rejected in the offer.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ CreateAnswerToOfferWithRejectedMedia) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ opts.data_channel_type = cricket::DCT_RTP;
+ talk_base::scoped_ptr<SessionDescription>
+ offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ ContentInfo* ac = offer->GetContentByName("audio");
+ ContentInfo* vc = offer->GetContentByName("video");
+ ContentInfo* dc = offer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ ASSERT_TRUE(dc != NULL);
+ ac->rejected = true;
+ vc->rejected = true;
+ dc->rejected = true;
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+ ac = answer->GetContentByName("audio");
+ vc = answer->GetContentByName("video");
+ dc = answer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ ASSERT_TRUE(dc != NULL);
+ EXPECT_TRUE(ac->rejected);
+ EXPECT_TRUE(vc->rejected);
+ EXPECT_TRUE(dc->rejected);
+}
+
+// Create an audio and video offer with:
+// - one video track
+// - two audio tracks
+// - two data tracks
+// and ensure it matches what we expect. Also updates the initial offer by
+// adding a new video track and replaces one of the audio tracks.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoOffer) {
+ MediaSessionOptions opts;
+ opts.AddStream(MEDIA_TYPE_VIDEO, kVideoTrack1, kMediaStream1);
+ opts.AddStream(MEDIA_TYPE_AUDIO, kAudioTrack1, kMediaStream1);
+ opts.AddStream(MEDIA_TYPE_AUDIO, kAudioTrack2, kMediaStream1);
+ opts.data_channel_type = cricket::DCT_RTP;
+ opts.AddStream(MEDIA_TYPE_DATA, kDataTrack1, kMediaStream1);
+ opts.AddStream(MEDIA_TYPE_DATA, kDataTrack2, kMediaStream1);
+
+ f1_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+
+ ASSERT_TRUE(offer.get() != NULL);
+ const ContentInfo* ac = offer->GetContentByName("audio");
+ const ContentInfo* vc = offer->GetContentByName("video");
+ const ContentInfo* dc = offer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ ASSERT_TRUE(dc != NULL);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const VideoContentDescription* vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+ const DataContentDescription* dcd =
+ static_cast<const DataContentDescription*>(dc->description);
+ EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
+ EXPECT_EQ(f1_.audio_codecs(), acd->codecs());
+
+ const StreamParamsVec& audio_streams = acd->streams();
+ ASSERT_EQ(2U, audio_streams.size());
+ EXPECT_EQ(audio_streams[0].cname , audio_streams[1].cname);
+ EXPECT_EQ(kAudioTrack1, audio_streams[0].id);
+ ASSERT_EQ(1U, audio_streams[0].ssrcs.size());
+ EXPECT_NE(0U, audio_streams[0].ssrcs[0]);
+ EXPECT_EQ(kAudioTrack2, audio_streams[1].id);
+ ASSERT_EQ(1U, audio_streams[1].ssrcs.size());
+ EXPECT_NE(0U, audio_streams[1].ssrcs[0]);
+
+ EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(acd->rtcp_mux()); // rtcp-mux defaults on
+ ASSERT_CRYPTO(acd, 2U, CS_AES_CM_128_HMAC_SHA1_32);
+
+ EXPECT_EQ(MEDIA_TYPE_VIDEO, vcd->type());
+ EXPECT_EQ(f1_.video_codecs(), vcd->codecs());
+ ASSERT_CRYPTO(vcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+
+ const StreamParamsVec& video_streams = vcd->streams();
+ ASSERT_EQ(1U, video_streams.size());
+ EXPECT_EQ(video_streams[0].cname, audio_streams[0].cname);
+ EXPECT_EQ(kVideoTrack1, video_streams[0].id);
+ EXPECT_EQ(kAutoBandwidth, vcd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(vcd->rtcp_mux()); // rtcp-mux defaults on
+
+ EXPECT_EQ(MEDIA_TYPE_DATA, dcd->type());
+ EXPECT_EQ(f1_.data_codecs(), dcd->codecs());
+ ASSERT_CRYPTO(dcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+
+ const StreamParamsVec& data_streams = dcd->streams();
+ ASSERT_EQ(2U, data_streams.size());
+ EXPECT_EQ(data_streams[0].cname , data_streams[1].cname);
+ EXPECT_EQ(kDataTrack1, data_streams[0].id);
+ ASSERT_EQ(1U, data_streams[0].ssrcs.size());
+ EXPECT_NE(0U, data_streams[0].ssrcs[0]);
+ EXPECT_EQ(kDataTrack2, data_streams[1].id);
+ ASSERT_EQ(1U, data_streams[1].ssrcs.size());
+ EXPECT_NE(0U, data_streams[1].ssrcs[0]);
+
+ EXPECT_EQ(cricket::kDataMaxBandwidth,
+ dcd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(dcd->rtcp_mux()); // rtcp-mux defaults on
+ ASSERT_CRYPTO(dcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+
+
+ // Update the offer. Add a new video track that is not synched to the
+ // other tracks and replace audio track 2 with audio track 3.
+ opts.AddStream(MEDIA_TYPE_VIDEO, kVideoTrack2, kMediaStream2);
+ opts.RemoveStream(MEDIA_TYPE_AUDIO, kAudioTrack2);
+ opts.AddStream(MEDIA_TYPE_AUDIO, kAudioTrack3, kMediaStream1);
+ opts.RemoveStream(MEDIA_TYPE_DATA, kDataTrack2);
+ opts.AddStream(MEDIA_TYPE_DATA, kDataTrack3, kMediaStream1);
+ talk_base::scoped_ptr<SessionDescription>
+ updated_offer(f1_.CreateOffer(opts, offer.get()));
+
+ ASSERT_TRUE(updated_offer.get() != NULL);
+ ac = updated_offer->GetContentByName("audio");
+ vc = updated_offer->GetContentByName("video");
+ dc = updated_offer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ ASSERT_TRUE(dc != NULL);
+ const AudioContentDescription* updated_acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const VideoContentDescription* updated_vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+ const DataContentDescription* updated_dcd =
+ static_cast<const DataContentDescription*>(dc->description);
+
+ EXPECT_EQ(acd->type(), updated_acd->type());
+ EXPECT_EQ(acd->codecs(), updated_acd->codecs());
+ EXPECT_EQ(vcd->type(), updated_vcd->type());
+ EXPECT_EQ(vcd->codecs(), updated_vcd->codecs());
+ EXPECT_EQ(dcd->type(), updated_dcd->type());
+ EXPECT_EQ(dcd->codecs(), updated_dcd->codecs());
+ ASSERT_CRYPTO(updated_acd, 2U, CS_AES_CM_128_HMAC_SHA1_32);
+ EXPECT_TRUE(CompareCryptoParams(acd->cryptos(), updated_acd->cryptos()));
+ ASSERT_CRYPTO(updated_vcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_TRUE(CompareCryptoParams(vcd->cryptos(), updated_vcd->cryptos()));
+ ASSERT_CRYPTO(updated_dcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_TRUE(CompareCryptoParams(dcd->cryptos(), updated_dcd->cryptos()));
+
+ const StreamParamsVec& updated_audio_streams = updated_acd->streams();
+ ASSERT_EQ(2U, updated_audio_streams.size());
+ EXPECT_EQ(audio_streams[0], updated_audio_streams[0]);
+ EXPECT_EQ(kAudioTrack3, updated_audio_streams[1].id); // New audio track.
+ ASSERT_EQ(1U, updated_audio_streams[1].ssrcs.size());
+ EXPECT_NE(0U, updated_audio_streams[1].ssrcs[0]);
+ EXPECT_EQ(updated_audio_streams[0].cname, updated_audio_streams[1].cname);
+
+ const StreamParamsVec& updated_video_streams = updated_vcd->streams();
+ ASSERT_EQ(2U, updated_video_streams.size());
+ EXPECT_EQ(video_streams[0], updated_video_streams[0]);
+ EXPECT_EQ(kVideoTrack2, updated_video_streams[1].id);
+ EXPECT_NE(updated_video_streams[1].cname, updated_video_streams[0].cname);
+
+ const StreamParamsVec& updated_data_streams = updated_dcd->streams();
+ ASSERT_EQ(2U, updated_data_streams.size());
+ EXPECT_EQ(data_streams[0], updated_data_streams[0]);
+ EXPECT_EQ(kDataTrack3, updated_data_streams[1].id); // New data track.
+ ASSERT_EQ(1U, updated_data_streams[1].ssrcs.size());
+ EXPECT_NE(0U, updated_data_streams[1].ssrcs[0]);
+ EXPECT_EQ(updated_data_streams[0].cname, updated_data_streams[1].cname);
+}
+
+// Create an audio and video answer to a standard video offer with:
+// - one video track
+// - two audio tracks
+// - two data tracks
+// and ensure it matches what we expect. Also updates the initial answer by
+// adding a new video track and removes one of the audio tracks.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCreateMultiStreamVideoAnswer) {
+ MediaSessionOptions offer_opts;
+ offer_opts.has_video = true;
+ offer_opts.data_channel_type = cricket::DCT_RTP;
+ f1_.set_secure(SEC_ENABLED);
+ f2_.set_secure(SEC_ENABLED);
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(offer_opts,
+ NULL));
+
+ MediaSessionOptions opts;
+ opts.AddStream(MEDIA_TYPE_VIDEO, kVideoTrack1, kMediaStream1);
+ opts.AddStream(MEDIA_TYPE_AUDIO, kAudioTrack1, kMediaStream1);
+ opts.AddStream(MEDIA_TYPE_AUDIO, kAudioTrack2, kMediaStream1);
+ opts.data_channel_type = cricket::DCT_RTP;
+ opts.AddStream(MEDIA_TYPE_DATA, kDataTrack1, kMediaStream1);
+ opts.AddStream(MEDIA_TYPE_DATA, kDataTrack2, kMediaStream1);
+
+ talk_base::scoped_ptr<SessionDescription>
+ answer(f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ ASSERT_TRUE(answer.get() != NULL);
+ const ContentInfo* ac = answer->GetContentByName("audio");
+ const ContentInfo* vc = answer->GetContentByName("video");
+ const ContentInfo* dc = answer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ ASSERT_TRUE(dc != NULL);
+ const AudioContentDescription* acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const VideoContentDescription* vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+ const DataContentDescription* dcd =
+ static_cast<const DataContentDescription*>(dc->description);
+ ASSERT_CRYPTO(acd, 1U, CS_AES_CM_128_HMAC_SHA1_32);
+ ASSERT_CRYPTO(vcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ ASSERT_CRYPTO(dcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+
+ EXPECT_EQ(MEDIA_TYPE_AUDIO, acd->type());
+ EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
+
+ const StreamParamsVec& audio_streams = acd->streams();
+ ASSERT_EQ(2U, audio_streams.size());
+ EXPECT_TRUE(audio_streams[0].cname == audio_streams[1].cname);
+ EXPECT_EQ(kAudioTrack1, audio_streams[0].id);
+ ASSERT_EQ(1U, audio_streams[0].ssrcs.size());
+ EXPECT_NE(0U, audio_streams[0].ssrcs[0]);
+ EXPECT_EQ(kAudioTrack2, audio_streams[1].id);
+ ASSERT_EQ(1U, audio_streams[1].ssrcs.size());
+ EXPECT_NE(0U, audio_streams[1].ssrcs[0]);
+
+ EXPECT_EQ(kAutoBandwidth, acd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(acd->rtcp_mux()); // rtcp-mux defaults on
+
+ EXPECT_EQ(MEDIA_TYPE_VIDEO, vcd->type());
+ EXPECT_EQ(MAKE_VECTOR(kVideoCodecsAnswer), vcd->codecs());
+
+ const StreamParamsVec& video_streams = vcd->streams();
+ ASSERT_EQ(1U, video_streams.size());
+ EXPECT_EQ(video_streams[0].cname, audio_streams[0].cname);
+ EXPECT_EQ(kVideoTrack1, video_streams[0].id);
+ EXPECT_EQ(kAutoBandwidth, vcd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(vcd->rtcp_mux()); // rtcp-mux defaults on
+
+ EXPECT_EQ(MEDIA_TYPE_DATA, dcd->type());
+ EXPECT_EQ(MAKE_VECTOR(kDataCodecsAnswer), dcd->codecs());
+
+ const StreamParamsVec& data_streams = dcd->streams();
+ ASSERT_EQ(2U, data_streams.size());
+ EXPECT_TRUE(data_streams[0].cname == data_streams[1].cname);
+ EXPECT_EQ(kDataTrack1, data_streams[0].id);
+ ASSERT_EQ(1U, data_streams[0].ssrcs.size());
+ EXPECT_NE(0U, data_streams[0].ssrcs[0]);
+ EXPECT_EQ(kDataTrack2, data_streams[1].id);
+ ASSERT_EQ(1U, data_streams[1].ssrcs.size());
+ EXPECT_NE(0U, data_streams[1].ssrcs[0]);
+
+ EXPECT_EQ(cricket::kDataMaxBandwidth,
+ dcd->bandwidth()); // default bandwidth (auto)
+ EXPECT_TRUE(dcd->rtcp_mux()); // rtcp-mux defaults on
+
+ // Update the answer. Add a new video track that is not synched to the
+ // other traacks and remove 1 audio track.
+ opts.AddStream(MEDIA_TYPE_VIDEO, kVideoTrack2, kMediaStream2);
+ opts.RemoveStream(MEDIA_TYPE_AUDIO, kAudioTrack2);
+ opts.RemoveStream(MEDIA_TYPE_DATA, kDataTrack2);
+ talk_base::scoped_ptr<SessionDescription>
+ updated_answer(f2_.CreateAnswer(offer.get(), opts, answer.get()));
+
+ ASSERT_TRUE(updated_answer.get() != NULL);
+ ac = updated_answer->GetContentByName("audio");
+ vc = updated_answer->GetContentByName("video");
+ dc = updated_answer->GetContentByName("data");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ ASSERT_TRUE(dc != NULL);
+ const AudioContentDescription* updated_acd =
+ static_cast<const AudioContentDescription*>(ac->description);
+ const VideoContentDescription* updated_vcd =
+ static_cast<const VideoContentDescription*>(vc->description);
+ const DataContentDescription* updated_dcd =
+ static_cast<const DataContentDescription*>(dc->description);
+
+ ASSERT_CRYPTO(updated_acd, 1U, CS_AES_CM_128_HMAC_SHA1_32);
+ EXPECT_TRUE(CompareCryptoParams(acd->cryptos(), updated_acd->cryptos()));
+ ASSERT_CRYPTO(updated_vcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_TRUE(CompareCryptoParams(vcd->cryptos(), updated_vcd->cryptos()));
+ ASSERT_CRYPTO(updated_dcd, 1U, CS_AES_CM_128_HMAC_SHA1_80);
+ EXPECT_TRUE(CompareCryptoParams(dcd->cryptos(), updated_dcd->cryptos()));
+
+ EXPECT_EQ(acd->type(), updated_acd->type());
+ EXPECT_EQ(acd->codecs(), updated_acd->codecs());
+ EXPECT_EQ(vcd->type(), updated_vcd->type());
+ EXPECT_EQ(vcd->codecs(), updated_vcd->codecs());
+ EXPECT_EQ(dcd->type(), updated_dcd->type());
+ EXPECT_EQ(dcd->codecs(), updated_dcd->codecs());
+
+ const StreamParamsVec& updated_audio_streams = updated_acd->streams();
+ ASSERT_EQ(1U, updated_audio_streams.size());
+ EXPECT_TRUE(audio_streams[0] == updated_audio_streams[0]);
+
+ const StreamParamsVec& updated_video_streams = updated_vcd->streams();
+ ASSERT_EQ(2U, updated_video_streams.size());
+ EXPECT_EQ(video_streams[0], updated_video_streams[0]);
+ EXPECT_EQ(kVideoTrack2, updated_video_streams[1].id);
+ EXPECT_NE(updated_video_streams[1].cname, updated_video_streams[0].cname);
+
+ const StreamParamsVec& updated_data_streams = updated_dcd->streams();
+ ASSERT_EQ(1U, updated_data_streams.size());
+ EXPECT_TRUE(data_streams[0] == updated_data_streams[0]);
+}
+
+
+// Create an updated offer after creating an answer to the original offer and
+// verify that the codecs that were part of the original answer are not changed
+// in the updated offer.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ RespondentCreatesOfferAfterCreatingAnswer) {
+ MediaSessionOptions opts;
+ opts.has_audio = true;
+ opts.has_video = true;
+
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ const AudioContentDescription* acd =
+ GetFirstAudioContentDescription(answer.get());
+ EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
+
+ const VideoContentDescription* vcd =
+ GetFirstVideoContentDescription(answer.get());
+ EXPECT_EQ(MAKE_VECTOR(kVideoCodecsAnswer), vcd->codecs());
+
+ talk_base::scoped_ptr<SessionDescription> updated_offer(
+ f2_.CreateOffer(opts, answer.get()));
+
+ // The expected audio codecs are the common audio codecs from the first
+ // offer/answer exchange plus the audio codecs only |f2_| offer, sorted in
+ // preference order.
+ const AudioCodec kUpdatedAudioCodecOffer[] = {
+ kAudioCodecs2[0],
+ kAudioCodecsAnswer[0],
+ kAudioCodecsAnswer[1],
+ };
+
+ // The expected video codecs are the common video codecs from the first
+ // offer/answer exchange plus the video codecs only |f2_| offer, sorted in
+ // preference order.
+ const VideoCodec kUpdatedVideoCodecOffer[] = {
+ kVideoCodecsAnswer[0],
+ kVideoCodecs2[1],
+ };
+
+ const AudioContentDescription* updated_acd =
+ GetFirstAudioContentDescription(updated_offer.get());
+ EXPECT_EQ(MAKE_VECTOR(kUpdatedAudioCodecOffer), updated_acd->codecs());
+
+ const VideoContentDescription* updated_vcd =
+ GetFirstVideoContentDescription(updated_offer.get());
+ EXPECT_EQ(MAKE_VECTOR(kUpdatedVideoCodecOffer), updated_vcd->codecs());
+}
+
+// Create an updated offer after creating an answer to the original offer and
+// verify that the codecs that were part of the original answer are not changed
+// in the updated offer. In this test Rtx is enabled.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ RespondentCreatesOfferAfterCreatingAnswerWithRtx) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ opts.has_audio = false;
+ std::vector<VideoCodec> f1_codecs = MAKE_VECTOR(kVideoCodecs1);
+ VideoCodec rtx_f1;
+ rtx_f1.id = 126;
+ rtx_f1.name = cricket::kRtxCodecName;
+
+ // This creates rtx for H264 with the payload type |f1_| uses.
+ rtx_f1.params[cricket::kCodecParamAssociatedPayloadType] =
+ talk_base::ToString<int>(kVideoCodecs1[1].id);
+ f1_codecs.push_back(rtx_f1);
+ f1_.set_video_codecs(f1_codecs);
+
+ std::vector<VideoCodec> f2_codecs = MAKE_VECTOR(kVideoCodecs2);
+ VideoCodec rtx_f2;
+ rtx_f2.id = 127;
+ rtx_f2.name = cricket::kRtxCodecName;
+
+ // This creates rtx for H264 with the payload type |f2_| uses.
+ rtx_f2.params[cricket::kCodecParamAssociatedPayloadType] =
+ talk_base::ToString<int>(kVideoCodecs2[0].id);
+ f2_codecs.push_back(rtx_f2);
+ f2_.set_video_codecs(f2_codecs);
+
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ const VideoContentDescription* vcd =
+ GetFirstVideoContentDescription(answer.get());
+
+ std::vector<VideoCodec> expected_codecs = MAKE_VECTOR(kVideoCodecsAnswer);
+ expected_codecs.push_back(rtx_f1);
+
+ EXPECT_EQ(expected_codecs, vcd->codecs());
+
+ // Now, make sure we get same result, except for the preference order,
+ // if |f2_| creates an updated offer even though the default payload types
+ // are different from |f1_|.
+ expected_codecs[0].preference = f1_codecs[1].preference;
+
+ talk_base::scoped_ptr<SessionDescription> updated_offer(
+ f2_.CreateOffer(opts, answer.get()));
+ ASSERT_TRUE(updated_offer);
+ talk_base::scoped_ptr<SessionDescription> updated_answer(
+ f1_.CreateAnswer(updated_offer.get(), opts, answer.get()));
+
+ const VideoContentDescription* updated_vcd =
+ GetFirstVideoContentDescription(updated_answer.get());
+
+ EXPECT_EQ(expected_codecs, updated_vcd->codecs());
+}
+
+// Create an updated offer that adds video after creating an audio only answer
+// to the original offer. This test verifies that if a video codec and the RTX
+// codec have the same default payload type as an audio codec that is already in
+// use, the added codecs payload types are changed.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ RespondentCreatesOfferWithVideoAndRtxAfterCreatingAudioAnswer) {
+ std::vector<VideoCodec> f1_codecs = MAKE_VECTOR(kVideoCodecs1);
+ VideoCodec rtx_f1;
+ rtx_f1.id = 126;
+ rtx_f1.name = cricket::kRtxCodecName;
+
+ // This creates rtx for H264 with the payload type |f1_| uses.
+ rtx_f1.params[cricket::kCodecParamAssociatedPayloadType] =
+ talk_base::ToString<int>(kVideoCodecs1[1].id);
+ f1_codecs.push_back(rtx_f1);
+ f1_.set_video_codecs(f1_codecs);
+
+ MediaSessionOptions opts;
+ opts.has_audio = true;
+ opts.has_video = false;
+
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ const AudioContentDescription* acd =
+ GetFirstAudioContentDescription(answer.get());
+ EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), acd->codecs());
+
+ // Now - let |f2_| add video with RTX and let the payload type the RTX codec
+ // reference be the same as an audio codec that was negotiated in the
+ // first offer/answer exchange.
+ opts.has_audio = true;
+ opts.has_video = true;
+
+ std::vector<VideoCodec> f2_codecs = MAKE_VECTOR(kVideoCodecs2);
+ int used_pl_type = acd->codecs()[0].id;
+ f2_codecs[0].id = used_pl_type; // Set the payload type for H264.
+ VideoCodec rtx_f2;
+ rtx_f2.id = 127;
+ rtx_f2.name = cricket::kRtxCodecName;
+ rtx_f2.params[cricket::kCodecParamAssociatedPayloadType] =
+ talk_base::ToString<int>(used_pl_type);
+ f2_codecs.push_back(rtx_f2);
+ f2_.set_video_codecs(f2_codecs);
+
+ talk_base::scoped_ptr<SessionDescription> updated_offer(
+ f2_.CreateOffer(opts, answer.get()));
+ ASSERT_TRUE(updated_offer);
+ talk_base::scoped_ptr<SessionDescription> updated_answer(
+ f1_.CreateAnswer(updated_offer.get(), opts, answer.get()));
+
+ const AudioContentDescription* updated_acd =
+ GetFirstAudioContentDescription(answer.get());
+ EXPECT_EQ(MAKE_VECTOR(kAudioCodecsAnswer), updated_acd->codecs());
+
+ const VideoContentDescription* updated_vcd =
+ GetFirstVideoContentDescription(updated_answer.get());
+
+ ASSERT_EQ("H264", updated_vcd->codecs()[0].name);
+ ASSERT_EQ(cricket::kRtxCodecName, updated_vcd->codecs()[1].name);
+ int new_h264_pl_type = updated_vcd->codecs()[0].id;
+ EXPECT_NE(used_pl_type, new_h264_pl_type);
+ VideoCodec rtx = updated_vcd->codecs()[1];
+ int pt_referenced_by_rtx = talk_base::FromString<int>(
+ rtx.params[cricket::kCodecParamAssociatedPayloadType]);
+ EXPECT_EQ(new_h264_pl_type, pt_referenced_by_rtx);
+}
+
+// Test that RTX is ignored when there is no associated payload type parameter.
+TEST_F(MediaSessionDescriptionFactoryTest, RtxWithoutApt) {
+ MediaSessionOptions opts;
+ opts.has_video = true;
+ opts.has_audio = false;
+ std::vector<VideoCodec> f1_codecs = MAKE_VECTOR(kVideoCodecs1);
+ VideoCodec rtx_f1;
+ rtx_f1.id = 126;
+ rtx_f1.name = cricket::kRtxCodecName;
+
+ f1_codecs.push_back(rtx_f1);
+ f1_.set_video_codecs(f1_codecs);
+
+ std::vector<VideoCodec> f2_codecs = MAKE_VECTOR(kVideoCodecs2);
+ VideoCodec rtx_f2;
+ rtx_f2.id = 127;
+ rtx_f2.name = cricket::kRtxCodecName;
+
+ // This creates rtx for H264 with the payload type |f2_| uses.
+ rtx_f2.SetParam(cricket::kCodecParamAssociatedPayloadType,
+ talk_base::ToString<int>(kVideoCodecs2[0].id));
+ f2_codecs.push_back(rtx_f2);
+ f2_.set_video_codecs(f2_codecs);
+
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ // kCodecParamAssociatedPayloadType will always be added to the offer when RTX
+ // is selected. Manually remove kCodecParamAssociatedPayloadType so that it
+ // is possible to test that that RTX is dropped when
+ // kCodecParamAssociatedPayloadType is missing in the offer.
+ VideoContentDescription* desc =
+ static_cast<cricket::VideoContentDescription*>(
+ offer->GetContentDescriptionByName(cricket::CN_VIDEO));
+ ASSERT_TRUE(desc != NULL);
+ std::vector<VideoCodec> codecs = desc->codecs();
+ for (std::vector<VideoCodec>::iterator iter = codecs.begin();
+ iter != codecs.end(); ++iter) {
+ if (iter->name.find(cricket::kRtxCodecName) == 0) {
+ iter->params.clear();
+ }
+ }
+ desc->set_codecs(codecs);
+
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ const VideoContentDescription* vcd =
+ GetFirstVideoContentDescription(answer.get());
+
+ for (std::vector<VideoCodec>::const_iterator iter = vcd->codecs().begin();
+ iter != vcd->codecs().end(); ++iter) {
+ ASSERT_STRNE(iter->name.c_str(), cricket::kRtxCodecName);
+ }
+}
+
+// Create an updated offer after creating an answer to the original offer and
+// verify that the RTP header extensions that were part of the original answer
+// are not changed in the updated offer.
+TEST_F(MediaSessionDescriptionFactoryTest,
+ RespondentCreatesOfferAfterCreatingAnswerWithRtpExtensions) {
+ MediaSessionOptions opts;
+ opts.has_audio = true;
+ opts.has_video = true;
+
+ f1_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension1));
+ f1_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension1));
+ f2_.set_audio_rtp_header_extensions(MAKE_VECTOR(kAudioRtpExtension2));
+ f2_.set_video_rtp_header_extensions(MAKE_VECTOR(kVideoRtpExtension2));
+
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(opts, NULL));
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), opts, NULL));
+
+ EXPECT_EQ(MAKE_VECTOR(kAudioRtpExtensionAnswer),
+ GetFirstAudioContentDescription(
+ answer.get())->rtp_header_extensions());
+ EXPECT_EQ(MAKE_VECTOR(kVideoRtpExtensionAnswer),
+ GetFirstVideoContentDescription(
+ answer.get())->rtp_header_extensions());
+
+ talk_base::scoped_ptr<SessionDescription> updated_offer(
+ f2_.CreateOffer(opts, answer.get()));
+
+ // The expected RTP header extensions in the new offer are the resulting
+ // extensions from the first offer/answer exchange plus the extensions only
+ // |f2_| offer.
+ // Since the default local extension id |f2_| uses has already been used by
+ // |f1_| for another extensions, it is changed to 255.
+ const RtpHeaderExtension kUpdatedAudioRtpExtensions[] = {
+ kAudioRtpExtensionAnswer[0],
+ RtpHeaderExtension(kAudioRtpExtension2[1].uri, 255),
+ };
+
+ // Since the default local extension id |f2_| uses has already been used by
+ // |f1_| for another extensions, is is changed to 254.
+ const RtpHeaderExtension kUpdatedVideoRtpExtensions[] = {
+ kVideoRtpExtensionAnswer[0],
+ RtpHeaderExtension(kVideoRtpExtension2[1].uri, 254),
+ };
+
+ const AudioContentDescription* updated_acd =
+ GetFirstAudioContentDescription(updated_offer.get());
+ EXPECT_EQ(MAKE_VECTOR(kUpdatedAudioRtpExtensions),
+ updated_acd->rtp_header_extensions());
+
+ const VideoContentDescription* updated_vcd =
+ GetFirstVideoContentDescription(updated_offer.get());
+ EXPECT_EQ(MAKE_VECTOR(kUpdatedVideoRtpExtensions),
+ updated_vcd->rtp_header_extensions());
+}
+
+TEST(MediaSessionDescription, CopySessionDescription) {
+ SessionDescription source;
+ cricket::ContentGroup group(cricket::CN_AUDIO);
+ source.AddGroup(group);
+ AudioContentDescription* acd(new AudioContentDescription());
+ acd->set_codecs(MAKE_VECTOR(kAudioCodecs1));
+ acd->AddLegacyStream(1);
+ source.AddContent(cricket::CN_AUDIO, cricket::NS_JINGLE_RTP, acd);
+ VideoContentDescription* vcd(new VideoContentDescription());
+ vcd->set_codecs(MAKE_VECTOR(kVideoCodecs1));
+ vcd->AddLegacyStream(2);
+ source.AddContent(cricket::CN_VIDEO, cricket::NS_JINGLE_RTP, vcd);
+
+ talk_base::scoped_ptr<SessionDescription> copy(source.Copy());
+ ASSERT_TRUE(copy.get() != NULL);
+ EXPECT_TRUE(copy->HasGroup(cricket::CN_AUDIO));
+ const ContentInfo* ac = copy->GetContentByName("audio");
+ const ContentInfo* vc = copy->GetContentByName("video");
+ ASSERT_TRUE(ac != NULL);
+ ASSERT_TRUE(vc != NULL);
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), ac->type);
+ const AudioContentDescription* acd_copy =
+ static_cast<const AudioContentDescription*>(ac->description);
+ EXPECT_EQ(acd->codecs(), acd_copy->codecs());
+ EXPECT_EQ(1u, acd->first_ssrc());
+
+ EXPECT_EQ(std::string(NS_JINGLE_RTP), vc->type);
+ const VideoContentDescription* vcd_copy =
+ static_cast<const VideoContentDescription*>(vc->description);
+ EXPECT_EQ(vcd->codecs(), vcd_copy->codecs());
+ EXPECT_EQ(2u, vcd->first_ssrc());
+}
+
+// The below TestTransportInfoXXX tests create different offers/answers, and
+// ensure the TransportInfo in the SessionDescription matches what we expect.
+TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoOfferAudio) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ TestTransportInfo(true, options, false);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoOfferAudioCurrent) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ TestTransportInfo(true, options, true);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoOfferMultimedia) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ TestTransportInfo(true, options, false);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest,
+ TestTransportInfoOfferMultimediaCurrent) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ TestTransportInfo(true, options, true);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoOfferBundle) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ options.bundle_enabled = true;
+ TestTransportInfo(true, options, false);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest,
+ TestTransportInfoOfferBundleCurrent) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ options.bundle_enabled = true;
+ TestTransportInfo(true, options, true);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoAnswerAudio) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ TestTransportInfo(false, options, false);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest,
+ TestTransportInfoAnswerAudioCurrent) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ TestTransportInfo(false, options, true);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoAnswerMultimedia) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ TestTransportInfo(false, options, false);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest,
+ TestTransportInfoAnswerMultimediaCurrent) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ TestTransportInfo(false, options, true);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest, TestTransportInfoAnswerBundle) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ options.bundle_enabled = true;
+ TestTransportInfo(false, options, false);
+}
+
+TEST_F(MediaSessionDescriptionFactoryTest,
+ TestTransportInfoAnswerBundleCurrent) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ options.bundle_enabled = true;
+ TestTransportInfo(false, options, true);
+}
+
+// Create an offer with bundle enabled and verify the crypto parameters are
+// the common set of the available cryptos.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoWithOfferBundle) {
+ TestCryptoWithBundle(true);
+}
+
+// Create an answer with bundle enabled and verify the crypto parameters are
+// the common set of the available cryptos.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoWithAnswerBundle) {
+ TestCryptoWithBundle(false);
+}
+
+// Test that we include both SDES and DTLS in the offer, but only include SDES
+// in the answer if DTLS isn't negotiated.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoDtls) {
+ f1_.set_secure(SEC_ENABLED);
+ f2_.set_secure(SEC_ENABLED);
+ tdf1_.set_secure(SEC_ENABLED);
+ tdf2_.set_secure(SEC_DISABLED);
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ talk_base::scoped_ptr<SessionDescription> offer, answer;
+ const cricket::MediaContentDescription* audio_media_desc;
+ const cricket::MediaContentDescription* video_media_desc;
+ const cricket::TransportDescription* audio_trans_desc;
+ const cricket::TransportDescription* video_trans_desc;
+
+ // Generate an offer with SDES and DTLS support.
+ offer.reset(f1_.CreateOffer(options, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+
+ audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
+ offer->GetContentDescriptionByName("audio"));
+ ASSERT_TRUE(audio_media_desc != NULL);
+ video_media_desc = static_cast<const cricket::MediaContentDescription*>(
+ offer->GetContentDescriptionByName("video"));
+ ASSERT_TRUE(video_media_desc != NULL);
+ EXPECT_EQ(2u, audio_media_desc->cryptos().size());
+ EXPECT_EQ(1u, video_media_desc->cryptos().size());
+
+ audio_trans_desc = offer->GetTransportDescriptionByName("audio");
+ ASSERT_TRUE(audio_trans_desc != NULL);
+ video_trans_desc = offer->GetTransportDescriptionByName("video");
+ ASSERT_TRUE(video_trans_desc != NULL);
+ ASSERT_TRUE(audio_trans_desc->identity_fingerprint.get() != NULL);
+ ASSERT_TRUE(video_trans_desc->identity_fingerprint.get() != NULL);
+
+ // Generate an answer with only SDES support, since tdf2 has crypto disabled.
+ answer.reset(f2_.CreateAnswer(offer.get(), options, NULL));
+ ASSERT_TRUE(answer.get() != NULL);
+
+ audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
+ answer->GetContentDescriptionByName("audio"));
+ ASSERT_TRUE(audio_media_desc != NULL);
+ video_media_desc = static_cast<const cricket::MediaContentDescription*>(
+ answer->GetContentDescriptionByName("video"));
+ ASSERT_TRUE(video_media_desc != NULL);
+ EXPECT_EQ(1u, audio_media_desc->cryptos().size());
+ EXPECT_EQ(1u, video_media_desc->cryptos().size());
+
+ audio_trans_desc = answer->GetTransportDescriptionByName("audio");
+ ASSERT_TRUE(audio_trans_desc != NULL);
+ video_trans_desc = answer->GetTransportDescriptionByName("video");
+ ASSERT_TRUE(video_trans_desc != NULL);
+ ASSERT_TRUE(audio_trans_desc->identity_fingerprint.get() == NULL);
+ ASSERT_TRUE(video_trans_desc->identity_fingerprint.get() == NULL);
+
+ // Enable DTLS; the answer should now only have DTLS support.
+ tdf2_.set_secure(SEC_ENABLED);
+ answer.reset(f2_.CreateAnswer(offer.get(), options, NULL));
+ ASSERT_TRUE(answer.get() != NULL);
+
+ audio_media_desc = static_cast<const cricket::MediaContentDescription*>(
+ answer->GetContentDescriptionByName("audio"));
+ ASSERT_TRUE(audio_media_desc != NULL);
+ video_media_desc = static_cast<const cricket::MediaContentDescription*>(
+ answer->GetContentDescriptionByName("video"));
+ ASSERT_TRUE(video_media_desc != NULL);
+ EXPECT_TRUE(audio_media_desc->cryptos().empty());
+ EXPECT_TRUE(video_media_desc->cryptos().empty());
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf),
+ audio_media_desc->protocol());
+ EXPECT_EQ(std::string(cricket::kMediaProtocolSavpf),
+ video_media_desc->protocol());
+
+ audio_trans_desc = answer->GetTransportDescriptionByName("audio");
+ ASSERT_TRUE(audio_trans_desc != NULL);
+ video_trans_desc = answer->GetTransportDescriptionByName("video");
+ ASSERT_TRUE(video_trans_desc != NULL);
+ ASSERT_TRUE(audio_trans_desc->identity_fingerprint.get() != NULL);
+ ASSERT_TRUE(video_trans_desc->identity_fingerprint.get() != NULL);
+}
+
+// Test that an answer can't be created if cryptos are required but the offer is
+// unsecure.
+TEST_F(MediaSessionDescriptionFactoryTest, TestSecureAnswerToUnsecureOffer) {
+ MediaSessionOptions options;
+ f1_.set_secure(SEC_DISABLED);
+ tdf1_.set_secure(SEC_DISABLED);
+ f2_.set_secure(SEC_REQUIRED);
+ tdf1_.set_secure(SEC_ENABLED);
+
+ talk_base::scoped_ptr<SessionDescription> offer(f1_.CreateOffer(options,
+ NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f2_.CreateAnswer(offer.get(), options, NULL));
+ EXPECT_TRUE(answer.get() == NULL);
+}
+
+// Test that we accept a DTLS offer without SDES and create an appropriate
+// answer.
+TEST_F(MediaSessionDescriptionFactoryTest, TestCryptoOfferDtlsButNotSdes) {
+ f1_.set_secure(SEC_DISABLED);
+ f2_.set_secure(SEC_ENABLED);
+ tdf1_.set_secure(SEC_ENABLED);
+ tdf2_.set_secure(SEC_ENABLED);
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_RTP;
+
+ talk_base::scoped_ptr<SessionDescription> offer, answer;
+
+ // Generate an offer with DTLS but without SDES.
+ offer.reset(f1_.CreateOffer(options, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+
+ const AudioContentDescription* audio_offer =
+ GetFirstAudioContentDescription(offer.get());
+ ASSERT_TRUE(audio_offer->cryptos().empty());
+ const VideoContentDescription* video_offer =
+ GetFirstVideoContentDescription(offer.get());
+ ASSERT_TRUE(video_offer->cryptos().empty());
+ const DataContentDescription* data_offer =
+ GetFirstDataContentDescription(offer.get());
+ ASSERT_TRUE(data_offer->cryptos().empty());
+
+ const cricket::TransportDescription* audio_offer_trans_desc =
+ offer->GetTransportDescriptionByName("audio");
+ ASSERT_TRUE(audio_offer_trans_desc->identity_fingerprint.get() != NULL);
+ const cricket::TransportDescription* video_offer_trans_desc =
+ offer->GetTransportDescriptionByName("video");
+ ASSERT_TRUE(video_offer_trans_desc->identity_fingerprint.get() != NULL);
+ const cricket::TransportDescription* data_offer_trans_desc =
+ offer->GetTransportDescriptionByName("data");
+ ASSERT_TRUE(data_offer_trans_desc->identity_fingerprint.get() != NULL);
+
+ // Generate an answer with DTLS.
+ answer.reset(f2_.CreateAnswer(offer.get(), options, NULL));
+ ASSERT_TRUE(answer.get() != NULL);
+
+ const cricket::TransportDescription* audio_answer_trans_desc =
+ answer->GetTransportDescriptionByName("audio");
+ EXPECT_TRUE(audio_answer_trans_desc->identity_fingerprint.get() != NULL);
+ const cricket::TransportDescription* video_answer_trans_desc =
+ answer->GetTransportDescriptionByName("video");
+ EXPECT_TRUE(video_answer_trans_desc->identity_fingerprint.get() != NULL);
+ const cricket::TransportDescription* data_answer_trans_desc =
+ answer->GetTransportDescriptionByName("data");
+ EXPECT_TRUE(data_answer_trans_desc->identity_fingerprint.get() != NULL);
+}
+
+// Verifies if vad_enabled option is set to false, CN codecs are not present in
+// offer or answer.
+TEST_F(MediaSessionDescriptionFactoryTest, TestVADEnableOption) {
+ MediaSessionOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ talk_base::scoped_ptr<SessionDescription> offer(
+ f1_.CreateOffer(options, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ const ContentInfo* audio_content = offer->GetContentByName("audio");
+ EXPECT_FALSE(VerifyNoCNCodecs(audio_content));
+
+ options.vad_enabled = false;
+ offer.reset(f1_.CreateOffer(options, NULL));
+ ASSERT_TRUE(offer.get() != NULL);
+ audio_content = offer->GetContentByName("audio");
+ EXPECT_TRUE(VerifyNoCNCodecs(audio_content));
+ talk_base::scoped_ptr<SessionDescription> answer(
+ f1_.CreateAnswer(offer.get(), options, NULL));
+ ASSERT_TRUE(answer.get() != NULL);
+ audio_content = answer->GetContentByName("audio");
+ EXPECT_TRUE(VerifyNoCNCodecs(audio_content));
+}
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediasessionclient.cc b/chromium/third_party/libjingle/source/talk/session/media/mediasessionclient.cc
new file mode 100644
index 00000000000..b54891e8ccb
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediasessionclient.cc
@@ -0,0 +1,1148 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+
+#include "talk/session/media/mediasessionclient.h"
+
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringencode.h"
+#include "talk/base/stringutils.h"
+#include "talk/media/base/cryptoparams.h"
+#include "talk/media/base/capturemanager.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/base/parsing.h"
+#include "talk/session/media/mediamessages.h"
+#include "talk/session/media/srtpfilter.h"
+#include "talk/xmllite/qname.h"
+#include "talk/xmllite/xmlconstants.h"
+#include "talk/xmpp/constants.h"
+
+namespace cricket {
+
+#if !defined(DISABLE_MEDIA_ENGINE_FACTORY)
+MediaSessionClient::MediaSessionClient(
+ const buzz::Jid& jid, SessionManager *manager)
+ : jid_(jid),
+ session_manager_(manager),
+ focus_call_(NULL),
+ channel_manager_(new ChannelManager(session_manager_->worker_thread())),
+ desc_factory_(channel_manager_,
+ session_manager_->transport_desc_factory()),
+ multisession_enabled_(false) {
+ Construct();
+}
+#endif
+
+MediaSessionClient::MediaSessionClient(
+ const buzz::Jid& jid, SessionManager *manager,
+ MediaEngineInterface* media_engine,
+ DataEngineInterface* data_media_engine,
+ DeviceManagerInterface* device_manager)
+ : jid_(jid),
+ session_manager_(manager),
+ focus_call_(NULL),
+ channel_manager_(new ChannelManager(
+ media_engine, data_media_engine,
+ device_manager, new CaptureManager(),
+ session_manager_->worker_thread())),
+ desc_factory_(channel_manager_,
+ session_manager_->transport_desc_factory()),
+ multisession_enabled_(false) {
+ Construct();
+}
+
+void MediaSessionClient::Construct() {
+ // Register ourselves as the handler of audio and video sessions.
+ session_manager_->AddClient(NS_JINGLE_RTP, this);
+ // Forward device notifications.
+ SignalDevicesChange.repeat(channel_manager_->SignalDevicesChange);
+ // Bring up the channel manager.
+ // In previous versions of ChannelManager, this was done automatically
+ // in the constructor.
+ channel_manager_->Init();
+}
+
+MediaSessionClient::~MediaSessionClient() {
+ // Destroy all calls
+ std::map<uint32, Call *>::iterator it;
+ while (calls_.begin() != calls_.end()) {
+ std::map<uint32, Call *>::iterator it = calls_.begin();
+ DestroyCall((*it).second);
+ }
+
+ // Delete channel manager. This will wait for the channels to exit
+ delete channel_manager_;
+
+ // Remove ourselves from the client map.
+ session_manager_->RemoveClient(NS_JINGLE_RTP);
+}
+
+Call *MediaSessionClient::CreateCall() {
+ Call *call = new Call(this);
+ calls_[call->id()] = call;
+ SignalCallCreate(call);
+ return call;
+}
+
+void MediaSessionClient::OnSessionCreate(Session *session,
+ bool received_initiate) {
+ if (received_initiate) {
+ session->SignalState.connect(this, &MediaSessionClient::OnSessionState);
+ }
+}
+
+void MediaSessionClient::OnSessionState(BaseSession* base_session,
+ BaseSession::State state) {
+ // MediaSessionClient can only be used with a Session*, so it's
+ // safe to cast here.
+ Session* session = static_cast<Session*>(base_session);
+
+ if (state == Session::STATE_RECEIVEDINITIATE) {
+ // The creation of the call must happen after the session has
+ // processed the initiate message because we need the
+ // remote_description to know what content names to use in the
+ // call.
+
+ // If our accept would have no codecs, then we must reject this call.
+ const SessionDescription* offer = session->remote_description();
+ const SessionDescription* accept = CreateAnswer(offer, CallOptions());
+ const ContentInfo* audio_content = GetFirstAudioContent(accept);
+ bool audio_rejected = (!audio_content) ? true : audio_content->rejected;
+ const AudioContentDescription* audio_desc = (!audio_content) ? NULL :
+ static_cast<const AudioContentDescription*>(audio_content->description);
+
+ // For some reason, we need a call even if we reject. So, either find a
+ // matching call or create a new one.
+ // The matching of existing calls is used to support the multi-session mode
+ // required for p2p handoffs: ie. once a MUC call is established, a new
+ // session may be established for the same call but is direct between the
+ // clients. To indicate that this is the case, the initiator of the incoming
+ // session is set to be the same as the remote name of the MUC for the
+ // existing session, thus the client can know that this is a new session for
+ // the existing call, rather than a whole new call.
+ Call* call = NULL;
+ if (multisession_enabled_) {
+ call = FindCallByRemoteName(session->initiator_name());
+ }
+
+ if (call == NULL) {
+ // Could not find a matching call, so create a new one.
+ call = CreateCall();
+ }
+
+ session_map_[session->id()] = call;
+ call->IncomingSession(session, offer);
+
+ if (audio_rejected || !audio_desc || audio_desc->codecs().size() == 0) {
+ session->Reject(STR_TERMINATE_INCOMPATIBLE_PARAMETERS);
+ }
+ delete accept;
+ }
+}
+
+void MediaSessionClient::DestroyCall(Call *call) {
+ // Change focus away, signal destruction
+
+ if (call == focus_call_)
+ SetFocus(NULL);
+ SignalCallDestroy(call);
+
+ // Remove it from calls_ map and delete
+
+ std::map<uint32, Call *>::iterator it = calls_.find(call->id());
+ if (it != calls_.end())
+ calls_.erase(it);
+
+ delete call;
+}
+
+void MediaSessionClient::OnSessionDestroy(Session *session) {
+ // Find the call this session is in, remove it
+ SessionMap::iterator it = session_map_.find(session->id());
+ ASSERT(it != session_map_.end());
+ if (it != session_map_.end()) {
+ Call *call = (*it).second;
+ session_map_.erase(it);
+ call->RemoveSession(session);
+ }
+}
+
+Call *MediaSessionClient::GetFocus() {
+ return focus_call_;
+}
+
+void MediaSessionClient::SetFocus(Call *call) {
+ Call *old_focus_call = focus_call_;
+ if (focus_call_ != call) {
+ if (focus_call_ != NULL)
+ focus_call_->EnableChannels(false);
+ focus_call_ = call;
+ if (focus_call_ != NULL)
+ focus_call_->EnableChannels(true);
+ SignalFocus(focus_call_, old_focus_call);
+ }
+}
+
+void MediaSessionClient::JoinCalls(Call *call_to_join, Call *call) {
+ // Move all sessions from call to call_to_join, delete call.
+ // If call_to_join has focus, added sessions should have enabled channels.
+
+ if (focus_call_ == call)
+ SetFocus(NULL);
+ call_to_join->Join(call, focus_call_ == call_to_join);
+ DestroyCall(call);
+}
+
+Session *MediaSessionClient::CreateSession(Call *call) {
+ std::string id;
+ return CreateSession(id, call);
+}
+
+Session *MediaSessionClient::CreateSession(const std::string& id, Call* call) {
+ const std::string& type = NS_JINGLE_RTP;
+ Session *session = session_manager_->CreateSession(id, jid().Str(), type);
+ session_map_[session->id()] = call;
+ return session;
+}
+
+Call *MediaSessionClient::FindCallByRemoteName(const std::string &remote_name) {
+ SessionMap::const_iterator call;
+ for (call = session_map_.begin(); call != session_map_.end(); ++call) {
+ std::vector<Session *> sessions = call->second->sessions();
+ std::vector<Session *>::const_iterator session;
+ for (session = sessions.begin(); session != sessions.end(); ++session) {
+ if (remote_name == (*session)->remote_name()) {
+ return call->second;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+// TODO(pthatcher): Move all of the parsing and writing functions into
+// mediamessages.cc, with unit tests.
+bool ParseGingleAudioCodec(const buzz::XmlElement* element, AudioCodec* out) {
+ int id = GetXmlAttr(element, QN_ID, -1);
+ if (id < 0)
+ return false;
+
+ std::string name = GetXmlAttr(element, QN_NAME, buzz::STR_EMPTY);
+ int clockrate = GetXmlAttr(element, QN_CLOCKRATE, 0);
+ int bitrate = GetXmlAttr(element, QN_BITRATE, 0);
+ int channels = GetXmlAttr(element, QN_CHANNELS, 1);
+ *out = AudioCodec(id, name, clockrate, bitrate, channels, 0);
+ return true;
+}
+
+bool ParseGingleVideoCodec(const buzz::XmlElement* element, VideoCodec* out) {
+ int id = GetXmlAttr(element, QN_ID, -1);
+ if (id < 0)
+ return false;
+
+ std::string name = GetXmlAttr(element, QN_NAME, buzz::STR_EMPTY);
+ int width = GetXmlAttr(element, QN_WIDTH, 0);
+ int height = GetXmlAttr(element, QN_HEIGHT, 0);
+ int framerate = GetXmlAttr(element, QN_FRAMERATE, 0);
+
+ *out = VideoCodec(id, name, width, height, framerate, 0);
+ return true;
+}
+
+// Parses an ssrc string as a legacy stream. If it fails, returns
+// false and fills an error message.
+bool ParseSsrcAsLegacyStream(const std::string& ssrc_str,
+ std::vector<StreamParams>* streams,
+ ParseError* error) {
+ if (!ssrc_str.empty()) {
+ uint32 ssrc;
+ if (!talk_base::FromString(ssrc_str, &ssrc)) {
+ return BadParse("Missing or invalid ssrc.", error);
+ }
+
+ streams->push_back(StreamParams::CreateLegacy(ssrc));
+ }
+ return true;
+}
+
+void ParseGingleSsrc(const buzz::XmlElement* parent_elem,
+ const buzz::QName& name,
+ MediaContentDescription* media) {
+ const buzz::XmlElement* ssrc_elem = parent_elem->FirstNamed(name);
+ if (ssrc_elem) {
+ ParseError error;
+ ParseSsrcAsLegacyStream(
+ ssrc_elem->BodyText(), &(media->mutable_streams()), &error);
+ }
+}
+
+bool ParseCryptoParams(const buzz::XmlElement* element,
+ CryptoParams* out,
+ ParseError* error) {
+ if (!element->HasAttr(QN_CRYPTO_SUITE)) {
+ return BadParse("crypto: crypto-suite attribute missing ", error);
+ } else if (!element->HasAttr(QN_CRYPTO_KEY_PARAMS)) {
+ return BadParse("crypto: key-params attribute missing ", error);
+ } else if (!element->HasAttr(QN_CRYPTO_TAG)) {
+ return BadParse("crypto: tag attribute missing ", error);
+ }
+
+ const std::string& crypto_suite = element->Attr(QN_CRYPTO_SUITE);
+ const std::string& key_params = element->Attr(QN_CRYPTO_KEY_PARAMS);
+ const int tag = GetXmlAttr(element, QN_CRYPTO_TAG, 0);
+ const std::string& session_params =
+ element->Attr(QN_CRYPTO_SESSION_PARAMS); // Optional.
+
+ *out = CryptoParams(tag, crypto_suite, key_params, session_params);
+ return true;
+}
+
+
+// Parse the first encryption element found with a matching 'usage'
+// element.
+// <usage/> is specific to Gingle. In Jingle, <crypto/> is already
+// scoped to a content.
+// Return false if there was an encryption element and it could not be
+// parsed.
+bool ParseGingleEncryption(const buzz::XmlElement* desc,
+ const buzz::QName& usage,
+ MediaContentDescription* media,
+ ParseError* error) {
+ for (const buzz::XmlElement* encryption = desc->FirstNamed(QN_ENCRYPTION);
+ encryption != NULL;
+ encryption = encryption->NextNamed(QN_ENCRYPTION)) {
+ if (encryption->FirstNamed(usage) != NULL) {
+ media->set_crypto_required(
+ GetXmlAttr(encryption, QN_ENCRYPTION_REQUIRED, false));
+ for (const buzz::XmlElement* crypto = encryption->FirstNamed(QN_CRYPTO);
+ crypto != NULL;
+ crypto = crypto->NextNamed(QN_CRYPTO)) {
+ CryptoParams params;
+ if (!ParseCryptoParams(crypto, &params, error)) {
+ return false;
+ }
+ media->AddCrypto(params);
+ }
+ break;
+ }
+ }
+ return true;
+}
+
+void ParseBandwidth(const buzz::XmlElement* parent_elem,
+ MediaContentDescription* media) {
+ const buzz::XmlElement* bw_elem = GetXmlChild(parent_elem, LN_BANDWIDTH);
+ int bandwidth_kbps = -1;
+ if (bw_elem && talk_base::FromString(bw_elem->BodyText(), &bandwidth_kbps)) {
+ if (bandwidth_kbps >= 0) {
+ media->set_bandwidth(bandwidth_kbps * 1000);
+ }
+ }
+}
+
+bool ParseGingleAudioContent(const buzz::XmlElement* content_elem,
+ ContentDescription** content,
+ ParseError* error) {
+ AudioContentDescription* audio = new AudioContentDescription();
+
+ if (content_elem->FirstElement()) {
+ for (const buzz::XmlElement* codec_elem =
+ content_elem->FirstNamed(QN_GINGLE_AUDIO_PAYLOADTYPE);
+ codec_elem != NULL;
+ codec_elem = codec_elem->NextNamed(QN_GINGLE_AUDIO_PAYLOADTYPE)) {
+ AudioCodec codec;
+ if (ParseGingleAudioCodec(codec_elem, &codec)) {
+ audio->AddCodec(codec);
+ }
+ }
+ } else {
+ // For backward compatibility, we can assume the other client is
+ // an old version of Talk if it has no audio payload types at all.
+ audio->AddCodec(AudioCodec(103, "ISAC", 16000, -1, 1, 1));
+ audio->AddCodec(AudioCodec(0, "PCMU", 8000, 64000, 1, 0));
+ }
+
+ ParseGingleSsrc(content_elem, QN_GINGLE_AUDIO_SRCID, audio);
+
+ if (!ParseGingleEncryption(content_elem, QN_GINGLE_AUDIO_CRYPTO_USAGE,
+ audio, error)) {
+ return false;
+ }
+
+ *content = audio;
+ return true;
+}
+
+bool ParseGingleVideoContent(const buzz::XmlElement* content_elem,
+ ContentDescription** content,
+ ParseError* error) {
+ VideoContentDescription* video = new VideoContentDescription();
+
+ for (const buzz::XmlElement* codec_elem =
+ content_elem->FirstNamed(QN_GINGLE_VIDEO_PAYLOADTYPE);
+ codec_elem != NULL;
+ codec_elem = codec_elem->NextNamed(QN_GINGLE_VIDEO_PAYLOADTYPE)) {
+ VideoCodec codec;
+ if (ParseGingleVideoCodec(codec_elem, &codec)) {
+ video->AddCodec(codec);
+ }
+ }
+
+ ParseGingleSsrc(content_elem, QN_GINGLE_VIDEO_SRCID, video);
+ ParseBandwidth(content_elem, video);
+
+ if (!ParseGingleEncryption(content_elem, QN_GINGLE_VIDEO_CRYPTO_USAGE,
+ video, error)) {
+ return false;
+ }
+
+ *content = video;
+ return true;
+}
+
+void ParsePayloadTypeParameters(const buzz::XmlElement* element,
+ std::map<std::string, std::string>* paramap) {
+ for (const buzz::XmlElement* param = element->FirstNamed(QN_PARAMETER);
+ param != NULL; param = param->NextNamed(QN_PARAMETER)) {
+ std::string name = GetXmlAttr(param, QN_PAYLOADTYPE_PARAMETER_NAME,
+ buzz::STR_EMPTY);
+ std::string value = GetXmlAttr(param, QN_PAYLOADTYPE_PARAMETER_VALUE,
+ buzz::STR_EMPTY);
+ if (!name.empty() && !value.empty()) {
+ paramap->insert(make_pair(name, value));
+ }
+ }
+}
+
+void ParseFeedbackParams(const buzz::XmlElement* element,
+ FeedbackParams* params) {
+ for (const buzz::XmlElement* param = element->FirstNamed(QN_JINGLE_RTCP_FB);
+ param != NULL; param = param->NextNamed(QN_JINGLE_RTCP_FB)) {
+ std::string type = GetXmlAttr(param, QN_TYPE, buzz::STR_EMPTY);
+ std::string subtype = GetXmlAttr(param, QN_SUBTYPE, buzz::STR_EMPTY);
+ if (!type.empty()) {
+ params->Add(FeedbackParam(type, subtype));
+ }
+ }
+}
+
+void AddFeedbackParams(const FeedbackParams& additional_params,
+ FeedbackParams* params) {
+ for (size_t i = 0; i < additional_params.params().size(); ++i) {
+ params->Add(additional_params.params()[i]);
+ }
+}
+
+int FindWithDefault(const std::map<std::string, std::string>& map,
+ const std::string& key, const int def) {
+ std::map<std::string, std::string>::const_iterator iter = map.find(key);
+ return (iter == map.end()) ? def : atoi(iter->second.c_str());
+}
+
+
+// Parse the first encryption element found.
+// Return false if there was an encryption element and it could not be
+// parsed.
+bool ParseJingleEncryption(const buzz::XmlElement* content_elem,
+ MediaContentDescription* media,
+ ParseError* error) {
+ const buzz::XmlElement* encryption =
+ content_elem->FirstNamed(QN_ENCRYPTION);
+ if (encryption == NULL) {
+ return true;
+ }
+
+ media->set_crypto_required(
+ GetXmlAttr(encryption, QN_ENCRYPTION_REQUIRED, false));
+
+ for (const buzz::XmlElement* crypto = encryption->FirstNamed(QN_CRYPTO);
+ crypto != NULL;
+ crypto = crypto->NextNamed(QN_CRYPTO)) {
+ CryptoParams params;
+ if (!ParseCryptoParams(crypto, &params, error)) {
+ return false;
+ }
+ media->AddCrypto(params);
+ }
+ return true;
+}
+
+bool ParseJingleAudioCodec(const buzz::XmlElement* elem, AudioCodec* codec) {
+ int id = GetXmlAttr(elem, QN_ID, -1);
+ if (id < 0)
+ return false;
+
+ std::string name = GetXmlAttr(elem, QN_NAME, buzz::STR_EMPTY);
+ int clockrate = GetXmlAttr(elem, QN_CLOCKRATE, 0);
+ int channels = GetXmlAttr(elem, QN_CHANNELS, 1);
+
+ std::map<std::string, std::string> paramap;
+ ParsePayloadTypeParameters(elem, &paramap);
+ int bitrate = FindWithDefault(paramap, PAYLOADTYPE_PARAMETER_BITRATE, 0);
+
+ *codec = AudioCodec(id, name, clockrate, bitrate, channels, 0);
+ ParseFeedbackParams(elem, &codec->feedback_params);
+ return true;
+}
+
+bool ParseJingleVideoCodec(const buzz::XmlElement* elem, VideoCodec* codec) {
+ int id = GetXmlAttr(elem, QN_ID, -1);
+ if (id < 0)
+ return false;
+
+ std::string name = GetXmlAttr(elem, QN_NAME, buzz::STR_EMPTY);
+
+ std::map<std::string, std::string> paramap;
+ ParsePayloadTypeParameters(elem, &paramap);
+ int width = FindWithDefault(paramap, PAYLOADTYPE_PARAMETER_WIDTH, 0);
+ int height = FindWithDefault(paramap, PAYLOADTYPE_PARAMETER_HEIGHT, 0);
+ int framerate = FindWithDefault(paramap, PAYLOADTYPE_PARAMETER_FRAMERATE, 0);
+
+ *codec = VideoCodec(id, name, width, height, framerate, 0);
+ codec->params = paramap;
+ ParseFeedbackParams(elem, &codec->feedback_params);
+ return true;
+}
+
+bool ParseJingleDataCodec(const buzz::XmlElement* elem, DataCodec* codec) {
+ int id = GetXmlAttr(elem, QN_ID, -1);
+ if (id < 0)
+ return false;
+
+ std::string name = GetXmlAttr(elem, QN_NAME, buzz::STR_EMPTY);
+
+ *codec = DataCodec(id, name, 0);
+ ParseFeedbackParams(elem, &codec->feedback_params);
+ return true;
+}
+
+bool ParseJingleStreamsOrLegacySsrc(const buzz::XmlElement* desc_elem,
+ MediaContentDescription* media,
+ ParseError* error) {
+ if (HasJingleStreams(desc_elem)) {
+ if (!ParseJingleStreams(desc_elem, &(media->mutable_streams()), error)) {
+ return false;
+ }
+ } else {
+ const std::string ssrc_str = desc_elem->Attr(QN_SSRC);
+ if (!ParseSsrcAsLegacyStream(
+ ssrc_str, &(media->mutable_streams()), error)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool ParseJingleAudioContent(const buzz::XmlElement* content_elem,
+ ContentDescription** content,
+ ParseError* error) {
+ talk_base::scoped_ptr<AudioContentDescription> audio(
+ new AudioContentDescription());
+
+ FeedbackParams content_feedback_params;
+ ParseFeedbackParams(content_elem, &content_feedback_params);
+
+ for (const buzz::XmlElement* payload_elem =
+ content_elem->FirstNamed(QN_JINGLE_RTP_PAYLOADTYPE);
+ payload_elem != NULL;
+ payload_elem = payload_elem->NextNamed(QN_JINGLE_RTP_PAYLOADTYPE)) {
+ AudioCodec codec;
+ if (ParseJingleAudioCodec(payload_elem, &codec)) {
+ AddFeedbackParams(content_feedback_params, &codec.feedback_params);
+ audio->AddCodec(codec);
+ }
+ }
+
+ if (!ParseJingleStreamsOrLegacySsrc(content_elem, audio.get(), error)) {
+ return false;
+ }
+
+ if (!ParseJingleEncryption(content_elem, audio.get(), error)) {
+ return false;
+ }
+
+ audio->set_rtcp_mux(content_elem->FirstNamed(QN_JINGLE_RTCP_MUX) != NULL);
+
+ RtpHeaderExtensions hdrexts;
+ if (!ParseJingleRtpHeaderExtensions(content_elem, &hdrexts, error)) {
+ return false;
+ }
+ audio->set_rtp_header_extensions(hdrexts);
+
+ *content = audio.release();
+ return true;
+}
+
+bool ParseJingleVideoContent(const buzz::XmlElement* content_elem,
+ ContentDescription** content,
+ ParseError* error) {
+ talk_base::scoped_ptr<VideoContentDescription> video(
+ new VideoContentDescription());
+
+ FeedbackParams content_feedback_params;
+ ParseFeedbackParams(content_elem, &content_feedback_params);
+
+ for (const buzz::XmlElement* payload_elem =
+ content_elem->FirstNamed(QN_JINGLE_RTP_PAYLOADTYPE);
+ payload_elem != NULL;
+ payload_elem = payload_elem->NextNamed(QN_JINGLE_RTP_PAYLOADTYPE)) {
+ VideoCodec codec;
+ if (ParseJingleVideoCodec(payload_elem, &codec)) {
+ AddFeedbackParams(content_feedback_params, &codec.feedback_params);
+ video->AddCodec(codec);
+ }
+ }
+
+ if (!ParseJingleStreamsOrLegacySsrc(content_elem, video.get(), error)) {
+ return false;
+ }
+ ParseBandwidth(content_elem, video.get());
+
+ if (!ParseJingleEncryption(content_elem, video.get(), error)) {
+ return false;
+ }
+
+ video->set_rtcp_mux(content_elem->FirstNamed(QN_JINGLE_RTCP_MUX) != NULL);
+
+ RtpHeaderExtensions hdrexts;
+ if (!ParseJingleRtpHeaderExtensions(content_elem, &hdrexts, error)) {
+ return false;
+ }
+ video->set_rtp_header_extensions(hdrexts);
+
+ *content = video.release();
+ return true;
+}
+
+bool ParseJingleSctpDataContent(const buzz::XmlElement* content_elem,
+ ContentDescription** content,
+ ParseError* error) {
+ talk_base::scoped_ptr<DataContentDescription> data(
+ new DataContentDescription());
+ data->set_protocol(kMediaProtocolSctp);
+
+ for (const buzz::XmlElement* stream_elem =
+ content_elem->FirstNamed(QN_JINGLE_DRAFT_SCTP_STREAM);
+ stream_elem != NULL;
+ stream_elem = stream_elem->NextNamed(QN_JINGLE_DRAFT_SCTP_STREAM)) {
+ StreamParams stream;
+ stream.groupid = stream_elem->Attr(QN_NICK);
+ stream.id = stream_elem->Attr(QN_NAME);
+ uint32 sid;
+ if (!talk_base::FromString(stream_elem->Attr(QN_SID), &sid)) {
+ return BadParse("Missing or invalid sid.", error);
+ }
+ if (sid > kMaxSctpSid) {
+ return BadParse("SID is greater than max value.", error);
+ }
+
+ stream.ssrcs.push_back(sid);
+ data->mutable_streams().push_back(stream);
+ }
+
+ *content = data.release();
+ return true;
+}
+
+bool ParseJingleRtpDataContent(const buzz::XmlElement* content_elem,
+ ContentDescription** content,
+ ParseError* error) {
+ DataContentDescription* data = new DataContentDescription();
+
+ FeedbackParams content_feedback_params;
+ ParseFeedbackParams(content_elem, &content_feedback_params);
+
+ for (const buzz::XmlElement* payload_elem =
+ content_elem->FirstNamed(QN_JINGLE_RTP_PAYLOADTYPE);
+ payload_elem != NULL;
+ payload_elem = payload_elem->NextNamed(QN_JINGLE_RTP_PAYLOADTYPE)) {
+ DataCodec codec;
+ if (ParseJingleDataCodec(payload_elem, &codec)) {
+ AddFeedbackParams(content_feedback_params, &codec.feedback_params);
+ data->AddCodec(codec);
+ }
+ }
+
+ if (!ParseJingleStreamsOrLegacySsrc(content_elem, data, error)) {
+ return false;
+ }
+ ParseBandwidth(content_elem, data);
+
+ if (!ParseJingleEncryption(content_elem, data, error)) {
+ return false;
+ }
+
+ data->set_rtcp_mux(content_elem->FirstNamed(QN_JINGLE_RTCP_MUX) != NULL);
+
+ *content = data;
+ return true;
+}
+
+bool MediaSessionClient::ParseContent(SignalingProtocol protocol,
+ const buzz::XmlElement* content_elem,
+ ContentDescription** content,
+ ParseError* error) {
+ if (protocol == PROTOCOL_GINGLE) {
+ const std::string& content_type = content_elem->Name().Namespace();
+ if (NS_GINGLE_AUDIO == content_type) {
+ return ParseGingleAudioContent(content_elem, content, error);
+ } else if (NS_GINGLE_VIDEO == content_type) {
+ return ParseGingleVideoContent(content_elem, content, error);
+ } else {
+ return BadParse("Unknown content type: " + content_type, error);
+ }
+ } else {
+ const std::string& content_type = content_elem->Name().Namespace();
+ // We use the XMLNS of the <description> element to determine if
+ // it's RTP or SCTP.
+ if (content_type == NS_JINGLE_DRAFT_SCTP) {
+ return ParseJingleSctpDataContent(content_elem, content, error);
+ }
+
+ std::string media;
+ if (!RequireXmlAttr(content_elem, QN_JINGLE_CONTENT_MEDIA, &media, error))
+ return false;
+
+ if (media == JINGLE_CONTENT_MEDIA_AUDIO) {
+ return ParseJingleAudioContent(content_elem, content, error);
+ } else if (media == JINGLE_CONTENT_MEDIA_VIDEO) {
+ return ParseJingleVideoContent(content_elem, content, error);
+ } else if (media == JINGLE_CONTENT_MEDIA_DATA) {
+ return ParseJingleRtpDataContent(content_elem, content, error);
+ } else {
+ return BadParse("Unknown media: " + media, error);
+ }
+ }
+}
+
+buzz::XmlElement* CreateGingleAudioCodecElem(const AudioCodec& codec) {
+ buzz::XmlElement* payload_type =
+ new buzz::XmlElement(QN_GINGLE_AUDIO_PAYLOADTYPE, true);
+ AddXmlAttr(payload_type, QN_ID, codec.id);
+ payload_type->AddAttr(QN_NAME, codec.name);
+ if (codec.clockrate > 0)
+ AddXmlAttr(payload_type, QN_CLOCKRATE, codec.clockrate);
+ if (codec.bitrate > 0)
+ AddXmlAttr(payload_type, QN_BITRATE, codec.bitrate);
+ if (codec.channels > 1)
+ AddXmlAttr(payload_type, QN_CHANNELS, codec.channels);
+ return payload_type;
+}
+
+buzz::XmlElement* CreateGingleVideoCodecElem(const VideoCodec& codec) {
+ buzz::XmlElement* payload_type =
+ new buzz::XmlElement(QN_GINGLE_VIDEO_PAYLOADTYPE, true);
+ AddXmlAttr(payload_type, QN_ID, codec.id);
+ payload_type->AddAttr(QN_NAME, codec.name);
+ AddXmlAttr(payload_type, QN_WIDTH, codec.width);
+ AddXmlAttr(payload_type, QN_HEIGHT, codec.height);
+ AddXmlAttr(payload_type, QN_FRAMERATE, codec.framerate);
+ return payload_type;
+}
+
+buzz::XmlElement* CreateGingleSsrcElem(const buzz::QName& name, uint32 ssrc) {
+ buzz::XmlElement* elem = new buzz::XmlElement(name, true);
+ if (ssrc) {
+ SetXmlBody(elem, ssrc);
+ }
+ return elem;
+}
+
+buzz::XmlElement* CreateBandwidthElem(const buzz::QName& name, int bps) {
+ int kbps = bps / 1000;
+ buzz::XmlElement* elem = new buzz::XmlElement(name);
+ elem->AddAttr(buzz::QN_TYPE, "AS");
+ SetXmlBody(elem, kbps);
+ return elem;
+}
+
+// For Jingle, usage_qname is empty.
+buzz::XmlElement* CreateJingleEncryptionElem(const CryptoParamsVec& cryptos,
+ bool required) {
+ buzz::XmlElement* encryption_elem = new buzz::XmlElement(QN_ENCRYPTION);
+
+ if (required) {
+ encryption_elem->SetAttr(QN_ENCRYPTION_REQUIRED, "true");
+ }
+
+ for (CryptoParamsVec::const_iterator i = cryptos.begin();
+ i != cryptos.end();
+ ++i) {
+ buzz::XmlElement* crypto_elem = new buzz::XmlElement(QN_CRYPTO);
+
+ AddXmlAttr(crypto_elem, QN_CRYPTO_TAG, i->tag);
+ crypto_elem->AddAttr(QN_CRYPTO_SUITE, i->cipher_suite);
+ crypto_elem->AddAttr(QN_CRYPTO_KEY_PARAMS, i->key_params);
+ if (!i->session_params.empty()) {
+ crypto_elem->AddAttr(QN_CRYPTO_SESSION_PARAMS, i->session_params);
+ }
+ encryption_elem->AddElement(crypto_elem);
+ }
+ return encryption_elem;
+}
+
+buzz::XmlElement* CreateGingleEncryptionElem(const CryptoParamsVec& cryptos,
+ const buzz::QName& usage_qname,
+ bool required) {
+ buzz::XmlElement* encryption_elem =
+ CreateJingleEncryptionElem(cryptos, required);
+
+ if (required) {
+ encryption_elem->SetAttr(QN_ENCRYPTION_REQUIRED, "true");
+ }
+
+ buzz::XmlElement* usage_elem = new buzz::XmlElement(usage_qname);
+ encryption_elem->AddElement(usage_elem);
+
+ return encryption_elem;
+}
+
+buzz::XmlElement* CreateGingleAudioContentElem(
+ const AudioContentDescription* audio,
+ bool crypto_required) {
+ buzz::XmlElement* elem =
+ new buzz::XmlElement(QN_GINGLE_AUDIO_CONTENT, true);
+
+ for (AudioCodecs::const_iterator codec = audio->codecs().begin();
+ codec != audio->codecs().end(); ++codec) {
+ elem->AddElement(CreateGingleAudioCodecElem(*codec));
+ }
+ if (audio->has_ssrcs()) {
+ elem->AddElement(CreateGingleSsrcElem(
+ QN_GINGLE_AUDIO_SRCID, audio->first_ssrc()));
+ }
+
+ const CryptoParamsVec& cryptos = audio->cryptos();
+ if (!cryptos.empty()) {
+ elem->AddElement(CreateGingleEncryptionElem(cryptos,
+ QN_GINGLE_AUDIO_CRYPTO_USAGE,
+ crypto_required));
+ }
+ return elem;
+}
+
+buzz::XmlElement* CreateGingleVideoContentElem(
+ const VideoContentDescription* video,
+ bool crypto_required) {
+ buzz::XmlElement* elem =
+ new buzz::XmlElement(QN_GINGLE_VIDEO_CONTENT, true);
+
+ for (VideoCodecs::const_iterator codec = video->codecs().begin();
+ codec != video->codecs().end(); ++codec) {
+ elem->AddElement(CreateGingleVideoCodecElem(*codec));
+ }
+ if (video->has_ssrcs()) {
+ elem->AddElement(CreateGingleSsrcElem(
+ QN_GINGLE_VIDEO_SRCID, video->first_ssrc()));
+ }
+ if (video->bandwidth() != kAutoBandwidth) {
+ elem->AddElement(CreateBandwidthElem(QN_GINGLE_VIDEO_BANDWIDTH,
+ video->bandwidth()));
+ }
+
+ const CryptoParamsVec& cryptos = video->cryptos();
+ if (!cryptos.empty()) {
+ elem->AddElement(CreateGingleEncryptionElem(cryptos,
+ QN_GINGLE_VIDEO_CRYPTO_USAGE,
+ crypto_required));
+ }
+
+ return elem;
+}
+
+template <class T>
+buzz::XmlElement* CreatePayloadTypeParameterElem(
+ const std::string& name, T value) {
+ buzz::XmlElement* elem = new buzz::XmlElement(QN_PARAMETER);
+
+ elem->AddAttr(QN_PAYLOADTYPE_PARAMETER_NAME, name);
+ AddXmlAttr(elem, QN_PAYLOADTYPE_PARAMETER_VALUE, value);
+
+ return elem;
+}
+
+void AddRtcpFeedbackElem(buzz::XmlElement* elem,
+ const FeedbackParams& feedback_params) {
+ std::vector<FeedbackParam>::const_iterator it;
+ for (it = feedback_params.params().begin();
+ it != feedback_params.params().end(); ++it) {
+ buzz::XmlElement* fb_elem = new buzz::XmlElement(QN_JINGLE_RTCP_FB);
+ fb_elem->AddAttr(QN_TYPE, it->id());
+ fb_elem->AddAttr(QN_SUBTYPE, it->param());
+ elem->AddElement(fb_elem);
+ }
+}
+
+buzz::XmlElement* CreateJingleAudioCodecElem(const AudioCodec& codec) {
+ buzz::XmlElement* elem = new buzz::XmlElement(QN_JINGLE_RTP_PAYLOADTYPE);
+
+ AddXmlAttr(elem, QN_ID, codec.id);
+ elem->AddAttr(QN_NAME, codec.name);
+ if (codec.clockrate > 0) {
+ AddXmlAttr(elem, QN_CLOCKRATE, codec.clockrate);
+ }
+ if (codec.bitrate > 0) {
+ elem->AddElement(CreatePayloadTypeParameterElem(
+ PAYLOADTYPE_PARAMETER_BITRATE, codec.bitrate));
+ }
+ if (codec.channels > 1) {
+ AddXmlAttr(elem, QN_CHANNELS, codec.channels);
+ }
+
+ AddRtcpFeedbackElem(elem, codec.feedback_params);
+
+ return elem;
+}
+
+buzz::XmlElement* CreateJingleVideoCodecElem(const VideoCodec& codec) {
+ buzz::XmlElement* elem = new buzz::XmlElement(QN_JINGLE_RTP_PAYLOADTYPE);
+
+ AddXmlAttr(elem, QN_ID, codec.id);
+ elem->AddAttr(QN_NAME, codec.name);
+ elem->AddElement(CreatePayloadTypeParameterElem(
+ PAYLOADTYPE_PARAMETER_WIDTH, codec.width));
+ elem->AddElement(CreatePayloadTypeParameterElem(
+ PAYLOADTYPE_PARAMETER_HEIGHT, codec.height));
+ elem->AddElement(CreatePayloadTypeParameterElem(
+ PAYLOADTYPE_PARAMETER_FRAMERATE, codec.framerate));
+
+ AddRtcpFeedbackElem(elem, codec.feedback_params);
+
+ CodecParameterMap::const_iterator param_iter;
+ for (param_iter = codec.params.begin(); param_iter != codec.params.end();
+ ++param_iter) {
+ elem->AddElement(CreatePayloadTypeParameterElem(param_iter->first,
+ param_iter->second));
+ }
+
+ return elem;
+}
+
+buzz::XmlElement* CreateJingleDataCodecElem(const DataCodec& codec) {
+ buzz::XmlElement* elem = new buzz::XmlElement(QN_JINGLE_RTP_PAYLOADTYPE);
+
+ AddXmlAttr(elem, QN_ID, codec.id);
+ elem->AddAttr(QN_NAME, codec.name);
+
+ AddRtcpFeedbackElem(elem, codec.feedback_params);
+
+ return elem;
+}
+
+void WriteLegacyJingleSsrc(const MediaContentDescription* media,
+ buzz::XmlElement* elem) {
+ if (media->has_ssrcs()) {
+ AddXmlAttr(elem, QN_SSRC, media->first_ssrc());
+ }
+}
+
+void WriteJingleStreamsOrLegacySsrc(const MediaContentDescription* media,
+ buzz::XmlElement* desc_elem) {
+ if (!media->multistream()) {
+ WriteLegacyJingleSsrc(media, desc_elem);
+ } else {
+ WriteJingleStreams(media->streams(), desc_elem);
+ }
+}
+
+buzz::XmlElement* CreateJingleAudioContentElem(
+ const AudioContentDescription* audio, bool crypto_required) {
+ buzz::XmlElement* elem =
+ new buzz::XmlElement(QN_JINGLE_RTP_CONTENT, true);
+
+ elem->SetAttr(QN_JINGLE_CONTENT_MEDIA, JINGLE_CONTENT_MEDIA_AUDIO);
+ WriteJingleStreamsOrLegacySsrc(audio, elem);
+
+ for (AudioCodecs::const_iterator codec = audio->codecs().begin();
+ codec != audio->codecs().end(); ++codec) {
+ elem->AddElement(CreateJingleAudioCodecElem(*codec));
+ }
+
+ const CryptoParamsVec& cryptos = audio->cryptos();
+ if (!cryptos.empty()) {
+ elem->AddElement(CreateJingleEncryptionElem(cryptos, crypto_required));
+ }
+
+ if (audio->rtcp_mux()) {
+ elem->AddElement(new buzz::XmlElement(QN_JINGLE_RTCP_MUX));
+ }
+
+ WriteJingleRtpHeaderExtensions(audio->rtp_header_extensions(), elem);
+
+ return elem;
+}
+
+buzz::XmlElement* CreateJingleVideoContentElem(
+ const VideoContentDescription* video, bool crypto_required) {
+ buzz::XmlElement* elem =
+ new buzz::XmlElement(QN_JINGLE_RTP_CONTENT, true);
+
+ elem->SetAttr(QN_JINGLE_CONTENT_MEDIA, JINGLE_CONTENT_MEDIA_VIDEO);
+ WriteJingleStreamsOrLegacySsrc(video, elem);
+
+ for (VideoCodecs::const_iterator codec = video->codecs().begin();
+ codec != video->codecs().end(); ++codec) {
+ elem->AddElement(CreateJingleVideoCodecElem(*codec));
+ }
+
+ const CryptoParamsVec& cryptos = video->cryptos();
+ if (!cryptos.empty()) {
+ elem->AddElement(CreateJingleEncryptionElem(cryptos, crypto_required));
+ }
+
+ if (video->rtcp_mux()) {
+ elem->AddElement(new buzz::XmlElement(QN_JINGLE_RTCP_MUX));
+ }
+
+ if (video->bandwidth() != kAutoBandwidth) {
+ elem->AddElement(CreateBandwidthElem(QN_JINGLE_RTP_BANDWIDTH,
+ video->bandwidth()));
+ }
+
+ WriteJingleRtpHeaderExtensions(video->rtp_header_extensions(), elem);
+
+ return elem;
+}
+
+buzz::XmlElement* CreateJingleSctpDataContentElem(
+ const DataContentDescription* data) {
+ buzz::XmlElement* content_elem =
+ new buzz::XmlElement(QN_JINGLE_DRAFT_SCTP_CONTENT, true);
+ for (std::vector<StreamParams>::const_iterator
+ stream = data->streams().begin();
+ stream != data->streams().end(); ++stream) {
+ buzz::XmlElement* stream_elem =
+ new buzz::XmlElement(QN_JINGLE_DRAFT_SCTP_STREAM, false);
+ AddXmlAttrIfNonEmpty(stream_elem, QN_NICK, stream->groupid);
+ AddXmlAttrIfNonEmpty(stream_elem, QN_NAME, stream->id);
+ if (!stream->ssrcs.empty()) {
+ AddXmlAttr(stream_elem, QN_SID, stream->ssrcs[0]);
+ }
+ content_elem->AddElement(stream_elem);
+ }
+ return content_elem;;
+}
+
+buzz::XmlElement* CreateJingleRtpDataContentElem(
+ const DataContentDescription* data, bool crypto_required) {
+
+ buzz::XmlElement* elem =
+ new buzz::XmlElement(QN_JINGLE_RTP_CONTENT, true);
+
+ elem->SetAttr(QN_JINGLE_CONTENT_MEDIA, JINGLE_CONTENT_MEDIA_DATA);
+ WriteJingleStreamsOrLegacySsrc(data, elem);
+
+ for (DataCodecs::const_iterator codec = data->codecs().begin();
+ codec != data->codecs().end(); ++codec) {
+ elem->AddElement(CreateJingleDataCodecElem(*codec));
+ }
+
+ const CryptoParamsVec& cryptos = data->cryptos();
+ if (!cryptos.empty()) {
+ elem->AddElement(CreateJingleEncryptionElem(cryptos, crypto_required));
+ }
+
+ if (data->rtcp_mux()) {
+ elem->AddElement(new buzz::XmlElement(QN_JINGLE_RTCP_MUX));
+ }
+
+ if (data->bandwidth() != kAutoBandwidth) {
+ elem->AddElement(CreateBandwidthElem(QN_JINGLE_RTP_BANDWIDTH,
+ data->bandwidth()));
+ }
+
+ return elem;
+}
+
+bool IsSctp(const DataContentDescription* data) {
+ return (data->protocol() == kMediaProtocolSctp ||
+ data->protocol() == kMediaProtocolDtlsSctp);
+}
+
+buzz::XmlElement* CreateJingleDataContentElem(
+ const DataContentDescription* data, bool crypto_required) {
+ if (IsSctp(data)) {
+ return CreateJingleSctpDataContentElem(data);
+ } else {
+ return CreateJingleRtpDataContentElem(data, crypto_required);
+ }
+}
+
+bool MediaSessionClient::IsWritable(SignalingProtocol protocol,
+ const ContentDescription* content) {
+ const MediaContentDescription* media =
+ static_cast<const MediaContentDescription*>(content);
+ if (protocol == PROTOCOL_GINGLE &&
+ media->type() == MEDIA_TYPE_DATA) {
+ return false;
+ }
+ return true;
+}
+
+bool MediaSessionClient::WriteContent(SignalingProtocol protocol,
+ const ContentDescription* content,
+ buzz::XmlElement** elem,
+ WriteError* error) {
+ const MediaContentDescription* media =
+ static_cast<const MediaContentDescription*>(content);
+ bool crypto_required = secure() == SEC_REQUIRED;
+
+ if (media->type() == MEDIA_TYPE_AUDIO) {
+ const AudioContentDescription* audio =
+ static_cast<const AudioContentDescription*>(media);
+ if (protocol == PROTOCOL_GINGLE) {
+ *elem = CreateGingleAudioContentElem(audio, crypto_required);
+ } else {
+ *elem = CreateJingleAudioContentElem(audio, crypto_required);
+ }
+ } else if (media->type() == MEDIA_TYPE_VIDEO) {
+ const VideoContentDescription* video =
+ static_cast<const VideoContentDescription*>(media);
+ if (protocol == PROTOCOL_GINGLE) {
+ *elem = CreateGingleVideoContentElem(video, crypto_required);
+ } else {
+ *elem = CreateJingleVideoContentElem(video, crypto_required);
+ }
+ } else if (media->type() == MEDIA_TYPE_DATA) {
+ const DataContentDescription* data =
+ static_cast<const DataContentDescription*>(media);
+ if (protocol == PROTOCOL_GINGLE) {
+ return BadWrite("Data channel not supported with Gingle.", error);
+ } else {
+ *elem = CreateJingleDataContentElem(data, crypto_required);
+ }
+ } else {
+ return BadWrite("Unknown content type: " +
+ talk_base::ToString<int>(media->type()), error);
+ }
+
+ return true;
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediasessionclient.h b/chromium/third_party/libjingle/source/talk/session/media/mediasessionclient.h
new file mode 100644
index 00000000000..1ade753f955
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediasessionclient.h
@@ -0,0 +1,175 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_MEDIASESSIONCLIENT_H_
+#define TALK_SESSION_MEDIA_MEDIASESSIONCLIENT_H_
+
+#include <string>
+#include <vector>
+#include <map>
+#include <algorithm>
+#include "talk/base/messagequeue.h"
+#include "talk/base/sigslot.h"
+#include "talk/base/sigslotrepeater.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/cryptoparams.h"
+#include "talk/p2p/base/session.h"
+#include "talk/p2p/base/sessionclient.h"
+#include "talk/p2p/base/sessiondescription.h"
+#include "talk/p2p/base/sessionmanager.h"
+#include "talk/session/media/call.h"
+#include "talk/session/media/channelmanager.h"
+#include "talk/session/media/mediasession.h"
+
+namespace cricket {
+
+class Call;
+
+class MediaSessionClient : public SessionClient, public sigslot::has_slots<> {
+ public:
+#if !defined(DISABLE_MEDIA_ENGINE_FACTORY)
+ MediaSessionClient(const buzz::Jid& jid, SessionManager *manager);
+#endif
+ // Alternative constructor, allowing injection of media_engine
+ // and device_manager.
+ MediaSessionClient(const buzz::Jid& jid, SessionManager *manager,
+ MediaEngineInterface* media_engine,
+ DataEngineInterface* data_media_engine,
+ DeviceManagerInterface* device_manager);
+ ~MediaSessionClient();
+
+ const buzz::Jid &jid() const { return jid_; }
+ SessionManager* session_manager() const { return session_manager_; }
+ ChannelManager* channel_manager() const { return channel_manager_; }
+
+ // Return mapping of call ids to Calls.
+ const std::map<uint32, Call *>& calls() const { return calls_; }
+
+ // The settings below combine with the settings on SessionManager to choose
+
+ // whether SDES-SRTP, DTLS-SRTP, or no security should be used. The possible
+ // combinations are shown in the following table. Note that where either DTLS
+ // or SDES is possible, DTLS is preferred. Thus to require either SDES or
+ // DTLS, but not mandate DTLS, set SDES to require and DTLS to enable.
+ //
+ // | SDES:Disable | SDES:Enable | SDES:Require |
+ // ----------------------------------------------------------------|
+ // DTLS:Disable | No SRTP | SDES Optional | SDES Mandatory |
+ // DTLS:Enable | DTLS Optional | DTLS/SDES Opt | DTLS/SDES Mand |
+ // DTLS:Require | DTLS Mandatory | DTLS Mandatory | DTLS Mandatory |
+
+ // Control use of SDES-SRTP.
+ SecurePolicy secure() const { return desc_factory_.secure(); }
+ void set_secure(SecurePolicy s) { desc_factory_.set_secure(s); }
+
+ // Control use of multiple sessions in a call.
+ void set_multisession_enabled(bool multisession_enabled) {
+ multisession_enabled_ = multisession_enabled;
+ }
+
+ int GetCapabilities() { return channel_manager_->GetCapabilities(); }
+
+ Call *CreateCall();
+ void DestroyCall(Call *call);
+
+ Call *GetFocus();
+ void SetFocus(Call *call);
+
+ void JoinCalls(Call *call_to_join, Call *call);
+
+ bool GetAudioInputDevices(std::vector<std::string>* names) {
+ return channel_manager_->GetAudioInputDevices(names);
+ }
+ bool GetAudioOutputDevices(std::vector<std::string>* names) {
+ return channel_manager_->GetAudioOutputDevices(names);
+ }
+ bool GetVideoCaptureDevices(std::vector<std::string>* names) {
+ return channel_manager_->GetVideoCaptureDevices(names);
+ }
+
+ bool SetAudioOptions(const std::string& in_name, const std::string& out_name,
+ int opts) {
+ return channel_manager_->SetAudioOptions(in_name, out_name, opts);
+ }
+ bool SetOutputVolume(int level) {
+ return channel_manager_->SetOutputVolume(level);
+ }
+ bool SetCaptureDevice(const std::string& cam_device) {
+ return channel_manager_->SetCaptureDevice(cam_device);
+ }
+
+ SessionDescription* CreateOffer(const CallOptions& options) {
+ return desc_factory_.CreateOffer(options, NULL);
+ }
+ SessionDescription* CreateAnswer(const SessionDescription* offer,
+ const CallOptions& options) {
+ return desc_factory_.CreateAnswer(offer, options, NULL);
+ }
+
+ sigslot::signal2<Call *, Call *> SignalFocus;
+ sigslot::signal1<Call *> SignalCallCreate;
+ sigslot::signal1<Call *> SignalCallDestroy;
+ sigslot::repeater0<> SignalDevicesChange;
+
+ virtual bool ParseContent(SignalingProtocol protocol,
+ const buzz::XmlElement* elem,
+ ContentDescription** content,
+ ParseError* error);
+ virtual bool IsWritable(SignalingProtocol protocol,
+ const ContentDescription* content);
+ virtual bool WriteContent(SignalingProtocol protocol,
+ const ContentDescription* content,
+ buzz::XmlElement** elem,
+ WriteError* error);
+
+ private:
+ void Construct();
+ void OnSessionCreate(Session *session, bool received_initiate);
+ void OnSessionState(BaseSession *session, BaseSession::State state);
+ void OnSessionDestroy(Session *session);
+ Session *CreateSession(Call *call);
+ Session *CreateSession(const std::string& id, Call* call);
+ Call *FindCallByRemoteName(const std::string &remote_name);
+
+ buzz::Jid jid_;
+ SessionManager* session_manager_;
+ Call *focus_call_;
+ ChannelManager *channel_manager_;
+ MediaSessionDescriptionFactory desc_factory_;
+ bool multisession_enabled_;
+ std::map<uint32, Call *> calls_;
+
+ // Maintain a mapping of session id to call.
+ typedef std::map<std::string, Call *> SessionMap;
+ SessionMap session_map_;
+
+ friend class Call;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_MEDIASESSIONCLIENT_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediasessionclient_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/mediasessionclient_unittest.cc
new file mode 100644
index 00000000000..1ad93722bda
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediasessionclient_unittest.cc
@@ -0,0 +1,3404 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+#include <vector>
+
+#include "talk/base/gunit.h"
+#include "talk/base/logging.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/media/devices/fakedevicemanager.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/client/basicportallocator.h"
+#include "talk/session/media/mediasessionclient.h"
+#include "talk/xmllite/xmlbuilder.h"
+#include "talk/xmllite/xmlelement.h"
+#include "talk/xmllite/xmlprinter.h"
+#include "talk/xmpp/constants.h"
+
+using cricket::AudioCodec;
+using cricket::AudioContentDescription;
+using cricket::Codec;
+using cricket::DataCodec;
+using cricket::DataContentDescription;
+using cricket::FeedbackParam;
+using cricket::FeedbackParams;
+using cricket::VideoCodec;
+using cricket::VideoContentDescription;
+
+// The codecs that our FakeMediaEngine will support. Order is important, since
+// the tests check that our messages have codecs in the correct order.
+static const cricket::AudioCodec kAudioCodecs[] = {
+ cricket::AudioCodec(103, "ISAC", 16000, -1, 1, 18),
+ cricket::AudioCodec(104, "ISAC", 32000, -1, 1, 17),
+ cricket::AudioCodec(119, "ISACLC", 16000, 40000, 1, 16),
+ cricket::AudioCodec(99, "speex", 16000, 22000, 1, 15),
+ cricket::AudioCodec(97, "IPCMWB", 16000, 80000, 1, 14),
+ cricket::AudioCodec(9, "G722", 16000, 64000, 1, 13),
+ cricket::AudioCodec(102, "iLBC", 8000, 13300, 1, 12),
+ cricket::AudioCodec(98, "speex", 8000, 11000, 1, 11),
+ cricket::AudioCodec(3, "GSM", 8000, 13000, 1, 10),
+ cricket::AudioCodec(100, "EG711U", 8000, 64000, 1, 9),
+ cricket::AudioCodec(101, "EG711A", 8000, 64000, 1, 8),
+ cricket::AudioCodec(0, "PCMU", 8000, 64000, 1, 7),
+ cricket::AudioCodec(8, "PCMA", 8000, 64000, 1, 6),
+ cricket::AudioCodec(126, "CN", 32000, 0, 1, 5),
+ cricket::AudioCodec(105, "CN", 16000, 0, 1, 4),
+ cricket::AudioCodec(13, "CN", 8000, 0, 1, 3),
+ cricket::AudioCodec(117, "red", 8000, 0, 1, 2),
+ cricket::AudioCodec(106, "telephone-event", 8000, 0, 1, 1)
+};
+
+static const cricket::VideoCodec kVideoCodecs[] = {
+ cricket::VideoCodec(96, "H264-SVC", 320, 200, 30, 1)
+};
+
+static const cricket::DataCodec kDataCodecs[] = {
+ cricket::DataCodec(127, "google-data", 0)
+};
+
+const std::string kGingleCryptoOffer = \
+ "<rtp:encryption xmlns:rtp='urn:xmpp:jingle:apps:rtp:1'> " \
+ " <usage/> " \
+ " <rtp:crypto tag='145' crypto-suite='AES_CM_128_HMAC_SHA1_32'" \
+ " key-params='inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9'/>" \
+ " <rtp:crypto tag='51' crypto-suite='AES_CM_128_HMAC_SHA1_80'" \
+ " key-params='inline:J4lfdUL8W1F7TNJKcbuygaQuA429SJy2e9JctPUy'/>" \
+ "</rtp:encryption> ";
+
+// Jingle offer does not have any <usage> element.
+const std::string kJingleCryptoOffer = \
+ "<rtp:encryption xmlns:rtp='urn:xmpp:jingle:apps:rtp:1'> " \
+ " <rtp:crypto tag='145' crypto-suite='AES_CM_128_HMAC_SHA1_32'" \
+ " key-params='inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9'/>" \
+ " <rtp:crypto tag='51' crypto-suite='AES_CM_128_HMAC_SHA1_80'" \
+ " key-params='inline:J4lfdUL8W1F7TNJKcbuygaQuA429SJy2e9JctPUy'/>" \
+ "</rtp:encryption> ";
+
+
+const std::string kGingleRequiredCryptoOffer = \
+ "<rtp:encryption xmlns:rtp='urn:xmpp:jingle:apps:rtp:1' required='true'> "\
+ " <usage/> " \
+ " <rtp:crypto tag='145' crypto-suite='AES_CM_128_HMAC_SHA1_32'" \
+ " key-params='inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9'/>" \
+ " <rtp:crypto tag='51' crypto-suite='AES_CM_128_HMAC_SHA1_80'" \
+ " key-params='inline:J4lfdUL8W1F7TNJKcbuygaQuA429SJy2e9JctPUy'/>" \
+ "</rtp:encryption> ";
+
+const std::string kJingleRequiredCryptoOffer = \
+ "<rtp:encryption xmlns:rtp='urn:xmpp:jingle:apps:rtp:1' required='true'> "\
+ " <rtp:crypto tag='145' crypto-suite='AES_CM_128_HMAC_SHA1_32'" \
+ " key-params='inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9'/>" \
+ " <rtp:crypto tag='51' crypto-suite='AES_CM_128_HMAC_SHA1_80'" \
+ " key-params='inline:J4lfdUL8W1F7TNJKcbuygaQuA429SJy2e9JctPUy'/>" \
+ "</rtp:encryption> ";
+
+
+const std::string kGingleUnsupportedCryptoOffer = \
+ "<rtp:encryption xmlns:rtp='urn:xmpp:jingle:apps:rtp:1'> " \
+ " <usage/> " \
+ " <rtp:crypto tag='145' crypto-suite='NOT_SUPPORTED_1'" \
+ " key-params='inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9'/>" \
+ " <rtp:crypto tag='51' crypto-suite='NOT_SUPPORTED_2'" \
+ " key-params='inline:J4lfdUL8W1F7TNJKcbuygaQuA429SJy2e9JctPUy'/>" \
+ "</rtp:encryption> ";
+
+const std::string kJingleUnsupportedCryptoOffer = \
+ "<rtp:encryption xmlns:rtp='urn:xmpp:jingle:apps:rtp:1'> " \
+ " <rtp:crypto tag='145' crypto-suite='NOT_SUPPORTED_1'" \
+ " key-params='inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9'/>" \
+ " <rtp:crypto tag='51' crypto-suite='NOT_SUPPORTED_2'" \
+ " key-params='inline:J4lfdUL8W1F7TNJKcbuygaQuA429SJy2e9JctPUy'/>" \
+ "</rtp:encryption> ";
+
+
+// With unsupported but with required="true"
+const std::string kGingleRequiredUnsupportedCryptoOffer = \
+ "<rtp:encryption xmlns:rtp='urn:xmpp:jingle:apps:rtp:1' required='true'>" \
+ " <usage/> " \
+ " <rtp:crypto tag='145' crypto-suite='NOT_SUPPORTED_1'" \
+ " key-params='inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9'/>" \
+ " <rtp:crypto tag='51' crypto-suite='NOT_SUPPORTED_2'" \
+ " key-params='inline:J4lfdUL8W1F7TNJKcbuygaQuA429SJy2e9JctPUy'/>" \
+ "</rtp:encryption> ";
+
+const std::string kJingleRequiredUnsupportedCryptoOffer = \
+ "<rtp:encryption xmlns:rtp='urn:xmpp:jingle:apps:rtp:1' required='true'>" \
+ " <rtp:crypto tag='145' crypto-suite='NOT_SUPPORTED_1' " \
+ " key-params='inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9'/> " \
+ " <rtp:crypto tag='51' crypto-suite='NOT_SUPPORTED_2' " \
+ " key-params='inline:J4lfdUL8W1F7TNJKcbuygaQuA429SJy2e9JctPUy'/>" \
+ "</rtp:encryption> ";
+
+const std::string kGingleInitiate(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='103' name='ISAC' clockrate='16000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='104' name='ISAC' clockrate='32000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='119' name='ISACLC' clockrate='16000' bitrate='40000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='99' name='speex' clockrate='16000' bitrate='22000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='97' name='IPCMWB' clockrate='16000' bitrate='80000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='9' name='G722' clockrate='16000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='102' name='iLBC' clockrate='8000' bitrate='13300' />" \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='98' name='speex' clockrate='8000' bitrate='11000' />" \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='3' name='GSM' clockrate='8000' bitrate='13000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='100' name='EG711U' clockrate='8000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='101' name='EG711A' clockrate='8000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='0' name='PCMU' clockrate='8000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='8' name='PCMA' clockrate='8000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='126' name='CN' clockrate='32000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='105' name='CN' clockrate='16000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='13' name='CN' clockrate='8000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='117' name='red' clockrate='8000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='106' name='telephone-event' clockrate='8000' /> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiate(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " <payload-type id='104' name='ISAC' clockrate='32000'/> " \
+ " <payload-type " \
+ " id='119' name='ISACLC' clockrate='16000'> " \
+ " <parameter name='bitrate' value='40000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='99' name='speex' clockrate='16000'> " \
+ " <parameter name='bitrate' value='22000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='97' name='IPCMWB' clockrate='16000'> " \
+ " <parameter name='bitrate' value='80000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='9' name='G722' clockrate='16000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='102' name='iLBC' clockrate='8000'> " \
+ " <parameter name='bitrate' value='13300'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='98' name='speex' clockrate='8000'> " \
+ " <parameter name='bitrate' value='11000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='3' name='GSM' clockrate='8000'> " \
+ " <parameter name='bitrate' value='13000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='100' name='EG711U' clockrate='8000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='101' name='EG711A' clockrate='8000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='0' name='PCMU' clockrate='8000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='8' name='PCMA' clockrate='8000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='126' name='CN' clockrate='32000' /> " \
+ " <payload-type " \
+ " id='105' name='CN' clockrate='16000' /> " \
+ " <payload-type " \
+ " id='13' name='CN' clockrate='8000' /> " \
+ " <payload-type " \
+ " id='117' name='red' clockrate='8000' /> " \
+ " <payload-type " \
+ " id='106' name='telephone-event' clockrate='8000' /> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+// Initiate string with a different order of supported codecs.
+// Should accept the supported ones, but with our desired order.
+const std::string kGingleInitiateDifferentPreference(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='104' name='ISAC' clockrate='32000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='97' name='IPCMWB' clockrate='16000' bitrate='80000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='9' name='G722' clockrate='16000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='119' name='ISACLC' clockrate='16000' bitrate='40000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='103' name='ISAC' clockrate='16000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='99' name='speex' clockrate='16000' bitrate='22000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='100' name='EG711U' clockrate='8000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='101' name='EG711A' clockrate='8000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='0' name='PCMU' clockrate='8000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='8' name='PCMA' clockrate='8000' bitrate='64000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='102' name='iLBC' clockrate='8000' bitrate='13300' />" \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='3' name='GSM' clockrate='8000' bitrate='13000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='98' name='speex' clockrate='8000' bitrate='11000' />" \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='126' name='CN' clockrate='32000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='105' name='CN' clockrate='16000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='13' name='CN' clockrate='8000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='117' name='red' clockrate='8000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='106' name='telephone-event' clockrate='8000' /> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateDifferentPreference(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'> " \
+ " <payload-type id='104' name='ISAC' clockrate='32000'/> " \
+ " <payload-type " \
+ " id='97' name='IPCMWB' clockrate='16000'> " \
+ " <parameter name='bitrate' value='80000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='9' name='G722' clockrate='16000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='119' name='ISACLC' clockrate='16000'> " \
+ " <parameter name='bitrate' value='40000'/> " \
+ " </payload-type> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " <payload-type " \
+ " id='99' name='speex' clockrate='16000'> " \
+ " <parameter name='bitrate' value='22000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='100' name='EG711U' clockrate='8000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='101' name='EG711A' clockrate='8000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='0' name='PCMU' clockrate='8000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='8' name='PCMA' clockrate='8000'> " \
+ " <parameter name='bitrate' value='64000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='102' name='iLBC' clockrate='8000'> " \
+ " <parameter name='bitrate' value='13300'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='3' name='GSM' clockrate='8000'> " \
+ " <parameter name='bitrate' value='13000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='98' name='speex' clockrate='8000'> " \
+ " <parameter name='bitrate' value='11000'/> " \
+ " </payload-type> " \
+ " <payload-type " \
+ " id='126' name='CN' clockrate='32000' /> " \
+ " <payload-type " \
+ " id='105' name='CN' clockrate='16000' /> " \
+ " <payload-type " \
+ " id='13' name='CN' clockrate='8000' /> " \
+ " <payload-type " \
+ " id='117' name='red' clockrate='8000' /> " \
+ " <payload-type " \
+ " id='106' name='telephone-event' clockrate='8000' /> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const std::string kJingleInitiateWithRtcpFb(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'> " \
+ " <rtcp-fb type='nack'/> " \
+ " </payload-type> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " <content name='test video'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='video'> " \
+ " <rtcp-fb type='nack'/> " \
+ " <payload-type id='99' name='H264-SVC'> " \
+ " <rtcp-fb type='ccm' subtype='fir'/> " \
+ " <parameter name='height' value='200'/> " \
+ " <parameter name='width' value='320'/> " \
+ " <parameter name='framerate' value='30'/> " \
+ " </payload-type> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " <content name='test data'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='data'> " \
+ " <rtcp-fb type='nack'/> " \
+ " <payload-type id='127' name='google-data'> " \
+ " </payload-type> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const std::string kGingleVideoInitiate(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/video'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='103' name='ISAC' clockrate='16000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/video' " \
+ " id='99' name='H264-SVC' framerate='30' " \
+ " height='200' width='320'/> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleVideoInitiate(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " <content name='test video'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='video'> " \
+ " <payload-type id='99' name='H264-SVC'> " \
+ " <parameter name='height' value='200'/> " \
+ " <parameter name='width' value='320'/> " \
+ " <parameter name='framerate' value='30'/> " \
+ " </payload-type> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const std::string kJingleVideoInitiateWithRtpData(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " <content name='test video'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='video'> " \
+ " <payload-type id='99' name='H264-SVC'> " \
+ " <parameter name='height' value='200'/> " \
+ " <parameter name='width' value='320'/> " \
+ " <parameter name='framerate' value='30'/> " \
+ " </payload-type> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " <content name='test data'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='data'> " \
+ " <payload-type id='127' name='google-data'/> " \
+ " <rtcp-mux/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const std::string kJingleVideoInitiateWithSctpData(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " <content name='test video'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='video'> " \
+ " <payload-type id='99' name='H264-SVC'> " \
+ " <parameter name='height' value='200'/> " \
+ " <parameter name='width' value='320'/> " \
+ " <parameter name='framerate' value='30'/> " \
+ " </payload-type> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " <content name='test data'> " \
+ " <description xmlns='google:jingle:sctp' media='data'> " \
+ " <stream sid='1'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const std::string kJingleVideoInitiateWithBandwidth(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " <content name='test video'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='video'> " \
+ " <payload-type id='99' name='H264-SVC'> " \
+ " <parameter name='height' value='200'/> " \
+ " <parameter name='width' value='320'/> " \
+ " <parameter name='framerate' value='30'/> " \
+ " </payload-type> " \
+ " <bandwidth type='AS'>42</bandwidth> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const std::string kJingleVideoInitiateWithRtcpMux(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " <content name='test video'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='video'> " \
+ " <payload-type id='99' name='H264-SVC'> " \
+ " <parameter name='height' value='200'/> " \
+ " <parameter name='width' value='320'/> " \
+ " <parameter name='framerate' value='30'/> " \
+ " </payload-type> " \
+ " <rtcp-mux/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+// Initiate string with a combination of supported and unsupported codecs
+// Should accept the supported ones
+const std::string kGingleInitiateSomeUnsupported(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='103' name='ISAC' clockrate='16000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='97' name='ASDFDS' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='102' name='1010' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='107' name='DFAS' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='100' name='EG711U' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='101' name='EG711A' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='0' name='PCMU' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='110' name=':)' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='13' name='CN' /> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateSomeUnsupported(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'> " \
+ " <payload-type " \
+ " id='103' name='ISAC' clockrate='16000' /> " \
+ " <payload-type " \
+ " id='97' name='ASDFDS' /> " \
+ " <payload-type " \
+ " id='102' name='1010' /> " \
+ " <payload-type " \
+ " id='107' name='DFAS' /> " \
+ " <payload-type " \
+ " id='100' name='EG711U' /> " \
+ " <payload-type " \
+ " id='101' name='EG711A' /> " \
+ " <payload-type " \
+ " id='0' name='PCMU' /> " \
+ " <payload-type " \
+ " id='110' name=':)' /> " \
+ " <payload-type " \
+ " id='13' name='CN' /> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const std::string kGingleVideoInitiateWithBandwidth(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/video'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='103' name='ISAC' clockrate='16000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/video' " \
+ " id='99' name='H264-SVC' framerate='30' " \
+ " height='200' width='320'/> " \
+ " <bandwidth type='AS'>42</bandwidth> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+// Initiate string without any supported codecs. Should send a reject.
+const std::string kGingleInitiateNoSupportedAudioCodecs(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='123' name='Supercodec6000' /> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateNoSupportedAudioCodecs(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'>" \
+ " <payload-type " \
+ " id='123' name='Supercodec6000' /> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+// Initiate string without any codecs. Assumes ancient version of Cricket
+// and tries a session with ISAC and PCMU
+const std::string kGingleInitiateNoAudioCodecs(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateNoAudioCodecs(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'>" \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+// The codecs are supported, but not at the given clockrates. Should send
+// a reject.
+const std::string kGingleInitiateWrongClockrates(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='103' name='ISAC' clockrate='8000'/> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='97' name='IPCMWB' clockrate='1337'/> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='102' name='iLBC' clockrate='1982' /> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateWrongClockrates(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'>" \
+ " <payload-type " \
+ " id='103' name='ISAC' clockrate='8000'/> " \
+ " <payload-type " \
+ " id='97' name='IPCMWB' clockrate='1337'/> " \
+ " <payload-type " \
+ " id='102' name='iLBC' clockrate='1982' /> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+// The codecs are supported, but not with the given number of channels.
+// Should send a reject.
+const std::string kGingleInitiateWrongChannels(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='103' name='ISAC' channels='2'/> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='97' name='IPCMWB' channels='3'/> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateWrongChannels(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'>" \
+ " <payload-type " \
+ " id='103' name='ISAC' channels='2'/> " \
+ " <payload-type " \
+ " id='97' name='IPCMWB' channels='3'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+// Initiate with a dynamic codec not using webrtc default payload id. Should
+// accept with provided payload id.
+const std::string kGingleInitiateDynamicAudioCodecs(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='123' name='speex' clockrate='16000'/> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateDynamicAudioCodecs(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'>" \
+ " <payload-type " \
+ " id='123' name='speex' clockrate='16000'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+// Initiate string with nothing but static codec id's. Should accept.
+const std::string kGingleInitiateStaticAudioCodecs(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='3' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='0' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='8' /> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateStaticAudioCodecs(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate' " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'>" \
+ " <payload-type id='3' /> " \
+ " <payload-type id='0' /> " \
+ " <payload-type id='8' /> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+// Initiate with payload type-less codecs. Should reject.
+const std::string kGingleInitiateNoPayloadTypes(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " name='ISAC' clockrate='16000'/> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateNoPayloadTypes(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate'> " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'>" \
+ " <payload-type name='ISAC' clockrate='16000'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+// Initiate with unnamed dynamic codces. Should reject.
+const std::string kGingleInitiateDynamicWithoutNames(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <session xmlns='http://www.google.com/session' type='initiate'" \
+ " id='abcdef' initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/phone'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='100' clockrate='16000'/> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleInitiateDynamicWithoutNames(
+ "<iq xmlns='jabber:client' from='me@domain.com/resource' " \
+ " to='user@domain.com/resource' type='set' id='123'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-initiate'> " \
+ " sid='abcdef' initiator='me@domain.com/resource'> " \
+ " <content name='test audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' media='audio'>" \
+ " <payload-type id='100' clockrate='16000'/> " \
+ " </description> " \
+ " <transport xmlns=\"http://www.google.com/transport/p2p\"/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const uint32 kAudioSsrc = 4294967295U;
+const uint32 kVideoSsrc = 87654321;
+const uint32 kDataSsrc = 1010101;
+const uint32 kDataSid = 0;
+// Note that this message does not specify a session ID. It must be populated
+// before use.
+const std::string kGingleAcceptWithSsrcs(
+ "<iq xmlns='jabber:client' from='me@mydomain.com' " \
+ " to='user@domain.com/resource' type='set' id='150'> " \
+ " <session xmlns='http://www.google.com/session' type='accept' " \
+ " initiator='me@domain.com/resource'> " \
+ " <description xmlns='http://www.google.com/session/video'> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='103' name='ISAC' clockrate='16000' /> " \
+ " <payload-type xmlns='http://www.google.com/session/phone' " \
+ " id='104' name='ISAC' clockrate='32000' /> " \
+ " <src-id xmlns='http://www.google.com/session/phone'> " \
+ " 4294967295</src-id> " \
+ " <src-id>87654321</src-id> " \
+ " </description> " \
+ " </session> " \
+ "</iq> ");
+
+const std::string kJingleAcceptWithSsrcs(
+ "<iq xmlns='jabber:client' from='me@mydomain.com' " \
+ " to='user@domain.com/resource' type='set' id='150'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-accept' " \
+ " initiator='me@domain.com/resource'> " \
+ " <content name='audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' " \
+ " media='audio' ssrc='4294967295'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " <payload-type id='104' name='ISAC' clockrate='32000'/> " \
+ " </description> " \
+ " <transport xmlns='http://www.google.com/transport/p2p'/> " \
+ " </content> " \
+ " <content name='video'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' " \
+ " media='video' ssrc='87654321'> " \
+ " </description> " \
+ " <transport xmlns='http://www.google.com/transport/p2p'/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const std::string kJingleAcceptWithRtpDataSsrcs(
+ "<iq xmlns='jabber:client' from='me@mydomain.com' " \
+ " to='user@domain.com/resource' type='set' id='150'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-accept' " \
+ " initiator='me@domain.com/resource'> " \
+ " <content name='audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' " \
+ " media='audio' ssrc='4294967295'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " <payload-type id='104' name='ISAC' clockrate='32000'/> " \
+ " </description> " \
+ " <transport xmlns='http://www.google.com/transport/p2p'/> " \
+ " </content> " \
+ " <content name='video'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' " \
+ " media='video' ssrc='87654321'> " \
+ " </description> " \
+ " <transport xmlns='http://www.google.com/transport/p2p'/> " \
+ " </content> " \
+ " <content name='data'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' " \
+ " media='data' ssrc='1010101'> " \
+ " </description> " \
+ " <transport xmlns='http://www.google.com/transport/p2p'/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+const std::string kJingleAcceptWithSctpData(
+ "<iq xmlns='jabber:client' from='me@mydomain.com' " \
+ " to='user@domain.com/resource' type='set' id='150'> " \
+ " <jingle xmlns='urn:xmpp:jingle:1' action='session-accept' " \
+ " initiator='me@domain.com/resource'> " \
+ " <content name='audio'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' " \
+ " media='audio' ssrc='4294967295'> " \
+ " <payload-type id='103' name='ISAC' clockrate='16000'/> " \
+ " <payload-type id='104' name='ISAC' clockrate='32000'/> " \
+ " </description> " \
+ " <transport xmlns='http://www.google.com/transport/p2p'/> " \
+ " </content> " \
+ " <content name='video'> " \
+ " <description xmlns='urn:xmpp:jingle:apps:rtp:1' " \
+ " media='video' ssrc='87654321'> " \
+ " </description> " \
+ " <transport xmlns='http://www.google.com/transport/p2p'/> " \
+ " </content> " \
+ " <content name='data'> " \
+ " <description xmlns='google:jingle:sctp'> " \
+ " <stream sid='1'/> " \
+ " </description> " \
+ " <transport xmlns='http://www.google.com/transport/p2p'/> " \
+ " </content> " \
+ " </jingle> " \
+ "</iq> ");
+
+std::string JingleView(const std::string& ssrc,
+ const std::string& width,
+ const std::string& height,
+ const std::string& framerate) {
+ // We have some slightly weird whitespace formatting to make the
+ // actual XML generated match the expected XML here.
+ return \
+ "<cli:iq"
+ " to='me@mydomain.com'"
+ " type='set'"
+ " xmlns:cli='jabber:client'>"
+ "<jingle"
+ " xmlns='urn:xmpp:jingle:1'"
+ " action='session-info'"
+ " sid=''>"
+ "<view xmlns='google:jingle'"
+ " name='video'"
+ " type='static'"
+ " ssrc='" + ssrc + "'>"
+ "<params"
+ " width='" + width + "'"
+ " height='" + height + "'"
+ " framerate='" + framerate + "'"
+ " preference='0'/>"
+ "</view>"
+ "</jingle>"
+ "</cli:iq>";
+}
+
+std::string JingleStreamAdd(const std::string& content_name,
+ const std::string& nick,
+ const std::string& name,
+ const std::string& ssrc) {
+ return \
+ "<iq"
+ " xmlns='jabber:client'"
+ " from='me@mydomain.com'"
+ " to='user@domain.com/resource'"
+ " type='set'"
+ " id='150'>"
+ " <jingle"
+ " xmlns='urn:xmpp:jingle:1'"
+ " action='description-info'>"
+ " <content"
+ " xmlns='urn:xmpp:jingle:1'"
+ " name='" + content_name + "'>"
+ " <description"
+ " xmlns='urn:xmpp:jingle:apps:rtp:1'"
+ " media='" + content_name + "'>"
+ " <streams"
+ " xmlns='google:jingle'>"
+ " <stream"
+ " nick='" + nick + "'"
+ " name='" + name + "'>"
+ " <ssrc>" + ssrc + "</ssrc>"
+ " </stream>"
+ " </streams>"
+ " </description>"
+ " </content>"
+ " </jingle>"
+ "</iq>";
+}
+
+std::string JingleOutboundStreamRemove(const std::string& sid,
+ const std::string& content_name,
+ const std::string& name) {
+ return \
+ "<cli:iq"
+ " to='me@mydomain.com'"
+ " type='set'"
+ " xmlns:cli='jabber:client'>"
+ "<jingle"
+ " xmlns='urn:xmpp:jingle:1'"
+ " action='description-info'"
+ " sid='" + sid + "'>"
+ "<content"
+ " name='" + content_name + "'"
+ " creator='initiator'>"
+ "<description"
+ " xmlns='urn:xmpp:jingle:apps:rtp:1'"
+ " media='" + content_name + "'>"
+ "<streams"
+ " xmlns='google:jingle'>"
+ "<stream"
+ " name='" + name + "'>"
+ "</stream>"
+ "</streams>"
+ "</description>"
+ "</content>"
+ "</jingle>"
+ "</cli:iq>";
+}
+
+std::string JingleOutboundStreamAdd(const std::string& sid,
+ const std::string& content_name,
+ const std::string& name,
+ const std::string& ssrc) {
+ return \
+ "<cli:iq"
+ " to='me@mydomain.com'"
+ " type='set'"
+ " xmlns:cli='jabber:client'>"
+ "<jingle"
+ " xmlns='urn:xmpp:jingle:1'"
+ " action='description-info'"
+ " sid='" + sid + "'>"
+ "<content"
+ " name='" + content_name + "'"
+ " creator='initiator'>"
+ "<description"
+ " xmlns='urn:xmpp:jingle:apps:rtp:1'"
+ " media='" + content_name + "'>"
+ "<streams"
+ " xmlns='google:jingle'>"
+ "<stream"
+ " name='" + name + "'>"
+ "<ssrc>" + ssrc + "</ssrc>"
+ "</stream>"
+ "</streams>"
+ "</description>"
+ "</content>"
+ "</jingle>"
+ "</cli:iq>";
+}
+
+std::string JingleStreamAddWithoutSsrc(const std::string& content_name,
+ const std::string& nick,
+ const std::string& name) {
+ return \
+ "<iq"
+ " xmlns='jabber:client'"
+ " from='me@mydomain.com'"
+ " to='user@domain.com/resource'"
+ " type='set'"
+ " id='150'>"
+ " <jingle"
+ " xmlns='urn:xmpp:jingle:1'"
+ " action='description-info'>"
+ " <content"
+ " xmlns='urn:xmpp:jingle:1'"
+ " name='" + content_name + "'>"
+ " <description"
+ " xmlns='urn:xmpp:jingle:apps:rtp:1'"
+ " media='" + content_name + "'>"
+ " <streams"
+ " xmlns='google:jingle'>"
+ " <stream"
+ " nick='" + nick + "'"
+ " name='" + name + "'>"
+ " </stream>"
+ " </streams>"
+ " </description>"
+ " </content>"
+ " </jingle>"
+ "</iq>";
+}
+
+std::string JingleStreamRemove(const std::string& content_name,
+ const std::string& nick,
+ const std::string& name) {
+ return \
+ "<iq"
+ " xmlns='jabber:client'"
+ " from='me@mydomain.com'"
+ " to='user@domain.com/resource'"
+ " type='set'"
+ " id='150'>"
+ " <jingle"
+ " xmlns='urn:xmpp:jingle:1'"
+ " action='description-info'>"
+ " <content"
+ " xmlns='urn:xmpp:jingle:1'"
+ " name='" + content_name + "'>"
+ " <description"
+ " xmlns='urn:xmpp:jingle:apps:rtp:1'"
+ " media='" + content_name + "'>"
+ " <streams"
+ " xmlns='google:jingle'>"
+ " <stream"
+ " nick='" + nick + "'"
+ " name='" + name + "'/>"
+ " </streams>"
+ " </description>"
+ " </content>"
+ " </jingle>"
+ "</iq>";
+}
+
+// Convenience function to get CallOptions that have audio enabled,
+// but not video or data.
+static cricket::CallOptions AudioCallOptions() {
+ cricket::CallOptions options;
+ options.has_audio = true;
+ options.has_video = false;
+ options.data_channel_type = cricket::DCT_NONE;
+ return options;
+}
+
+// Convenience function to get CallOptions that have audio and video
+// enabled, but not data.
+static cricket::CallOptions VideoCallOptions() {
+ cricket::CallOptions options;
+ options.has_audio = true;
+ options.has_video = true;
+ options.data_channel_type = cricket::DCT_NONE;
+ return options;
+}
+
+static buzz::XmlElement* CopyElement(const buzz::XmlElement* elem) {
+ return new buzz::XmlElement(*elem);
+}
+
+static std::string AddEncryption(std::string stanza, std::string encryption) {
+ std::string::size_type pos = stanza.find("</description>");
+ while (pos != std::string::npos) {
+ stanza = stanza.insert(pos, encryption);
+ pos = stanza.find("</description>", pos + encryption.length() + 1);
+ }
+ return stanza;
+}
+
+static int IntFromJingleCodecParameter(const buzz::XmlElement* parameter,
+ const std::string& expected_name) {
+ if (parameter) {
+ const std::string& actual_name =
+ parameter->Attr(cricket::QN_PAYLOADTYPE_PARAMETER_NAME);
+
+ EXPECT_EQ(expected_name, actual_name)
+ << "wrong parameter name. Expected '"
+ << expected_name << "'. Actually '"
+ << actual_name << "'.";
+
+ return atoi(parameter->Attr(
+ cricket::QN_PAYLOADTYPE_PARAMETER_VALUE).c_str());
+ }
+ return 0;
+}
+
+template <class CodecClass, class DescriptionClass>
+static void VerifyCodecFbParams(const FeedbackParams& expected,
+ const DescriptionClass* desc) {
+ if (!expected.params().empty()) {
+ ASSERT_TRUE(desc != NULL);
+ const std::vector<CodecClass> codecs = desc->codecs();
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ EXPECT_EQ(expected, codecs[i].feedback_params);
+ }
+ }
+}
+
+// Parses and extracts payload and codec info from test XML. Since
+// that XML will be in various contents (Gingle and Jingle), we need an
+// abstract parser with one concrete implementation per XML content.
+class MediaSessionTestParser {
+ public:
+ virtual buzz::XmlElement* ActionFromStanza(buzz::XmlElement* stanza) = 0;
+ virtual buzz::XmlElement* ContentFromAction(buzz::XmlElement* action) = 0;
+ virtual buzz::XmlElement* NextContent(buzz::XmlElement* content) = 0;
+ virtual buzz::XmlElement* PayloadTypeFromContent(
+ buzz::XmlElement* content) = 0;
+ virtual buzz::XmlElement* NextFromPayloadType(
+ buzz::XmlElement* payload_type) = 0;
+ virtual cricket::AudioCodec AudioCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) = 0;
+ virtual cricket::VideoCodec VideoCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) = 0;
+ virtual cricket::DataCodec DataCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) = 0;
+ virtual buzz::XmlElement* EncryptionFromContent(
+ buzz::XmlElement* content) = 0;
+ virtual buzz::XmlElement* NextFromEncryption(
+ buzz::XmlElement* encryption) = 0;
+ virtual const buzz::XmlElement* BandwidthFromContent(
+ buzz::XmlElement* content) = 0;
+ virtual const buzz::XmlElement* RtcpMuxFromContent(
+ buzz::XmlElement* content) = 0;
+ virtual bool ActionIsTerminate(const buzz::XmlElement* action) = 0;
+ virtual ~MediaSessionTestParser() {}
+};
+
+class JingleSessionTestParser : public MediaSessionTestParser {
+ public:
+ JingleSessionTestParser() {}
+
+ ~JingleSessionTestParser() {
+ }
+
+ buzz::XmlElement* ActionFromStanza(buzz::XmlElement* stanza) {
+ return stanza->FirstNamed(cricket::QN_JINGLE);
+ }
+
+ buzz::XmlElement* ContentFromAction(buzz::XmlElement* action) {
+ // We need to be able to use multiple contents, but the action
+ // gets deleted before we can call NextContent, so we need to
+ // stash away a copy.
+ action_.reset(CopyElement(action));
+ return action_->FirstNamed(cricket::QN_JINGLE_CONTENT);
+ }
+
+ buzz::XmlElement* NextContent(buzz::XmlElement* content) {
+ // For some reason, content->NextNamed(cricket::QN_JINGLE_CONTENT)
+ // doesn't work.
+ return action_->FirstNamed(cricket::QN_JINGLE_CONTENT)
+ ->NextNamed(cricket::QN_JINGLE_CONTENT);
+ }
+
+ buzz::XmlElement* PayloadTypeFromContent(buzz::XmlElement* content) {
+ buzz::XmlElement* content_desc =
+ content->FirstNamed(cricket::QN_JINGLE_RTP_CONTENT);
+ if (!content_desc)
+ return NULL;
+
+ return content_desc->FirstNamed(cricket::QN_JINGLE_RTP_PAYLOADTYPE);
+ }
+
+ buzz::XmlElement* NextFromPayloadType(buzz::XmlElement* payload_type) {
+ return payload_type->NextNamed(cricket::QN_JINGLE_RTP_PAYLOADTYPE);
+ }
+
+ void ParsePayloadTypeFeedbackParameters(const buzz::XmlElement* element,
+ FeedbackParams* params) {
+ const buzz::XmlElement* param =
+ element->FirstNamed(cricket::QN_JINGLE_RTCP_FB);
+ for (; param != NULL;
+ param = param->NextNamed(cricket::QN_JINGLE_RTCP_FB)) {
+ std::string type = param->Attr(cricket::QN_TYPE);
+ std::string subtype = param->Attr(cricket::QN_SUBTYPE);
+ if (!type.empty()) {
+ params->Add(FeedbackParam(type, subtype));
+ }
+ }
+ }
+
+ cricket::AudioCodec AudioCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) {
+ int id = 0;
+ if (payload_type->HasAttr(cricket::QN_ID))
+ id = atoi(payload_type->Attr(cricket::QN_ID).c_str());
+
+ std::string name;
+ if (payload_type->HasAttr(cricket::QN_NAME))
+ name = payload_type->Attr(cricket::QN_NAME);
+
+ int clockrate = 0;
+ if (payload_type->HasAttr(cricket::QN_CLOCKRATE))
+ clockrate = atoi(payload_type->Attr(cricket::QN_CLOCKRATE).c_str());
+
+ int bitrate = IntFromJingleCodecParameter(
+ payload_type->FirstNamed(cricket::QN_PARAMETER), "bitrate");
+
+ int channels = 1;
+ if (payload_type->HasAttr(cricket::QN_CHANNELS))
+ channels = atoi(payload_type->Attr(
+ cricket::QN_CHANNELS).c_str());
+
+ AudioCodec codec = AudioCodec(id, name, clockrate, bitrate, channels, 0);
+ ParsePayloadTypeFeedbackParameters(payload_type, &codec.feedback_params);
+ return codec;
+ }
+
+ cricket::VideoCodec VideoCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) {
+ int id = 0;
+ if (payload_type->HasAttr(cricket::QN_ID))
+ id = atoi(payload_type->Attr(cricket::QN_ID).c_str());
+
+ std::string name;
+ if (payload_type->HasAttr(cricket::QN_NAME))
+ name = payload_type->Attr(cricket::QN_NAME);
+
+ int width = 0;
+ int height = 0;
+ int framerate = 0;
+ const buzz::XmlElement* param =
+ payload_type->FirstNamed(cricket::QN_PARAMETER);
+ if (param) {
+ width = IntFromJingleCodecParameter(param, "width");
+ param = param->NextNamed(cricket::QN_PARAMETER);
+ if (param) {
+ height = IntFromJingleCodecParameter(param, "height");
+ param = param->NextNamed(cricket::QN_PARAMETER);
+ if (param) {
+ framerate = IntFromJingleCodecParameter(param, "framerate");
+ }
+ }
+ }
+ VideoCodec codec = VideoCodec(id, name, width, height, framerate, 0);
+ ParsePayloadTypeFeedbackParameters(payload_type, &codec.feedback_params);
+ return codec;
+ }
+
+ cricket::DataCodec DataCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) {
+ int id = 0;
+ if (payload_type->HasAttr(cricket::QN_ID))
+ id = atoi(payload_type->Attr(cricket::QN_ID).c_str());
+
+ std::string name;
+ if (payload_type->HasAttr(cricket::QN_NAME))
+ name = payload_type->Attr(cricket::QN_NAME);
+
+ DataCodec codec = DataCodec(id, name, 0);
+ ParsePayloadTypeFeedbackParameters(payload_type, &codec.feedback_params);
+ return codec;
+ }
+
+ bool ActionIsTerminate(const buzz::XmlElement* action) {
+ return (action->HasAttr(cricket::QN_ACTION) &&
+ action->Attr(cricket::QN_ACTION) == "session-terminate");
+ }
+
+ buzz::XmlElement* EncryptionFromContent(buzz::XmlElement* content) {
+ buzz::XmlElement* content_desc =
+ content->FirstNamed(cricket::QN_JINGLE_RTP_CONTENT);
+ if (!content_desc)
+ return NULL;
+
+ return content_desc->FirstNamed(cricket::QN_ENCRYPTION);
+ }
+
+ buzz::XmlElement* NextFromEncryption(buzz::XmlElement* encryption) {
+ return encryption->NextNamed(cricket::QN_ENCRYPTION);
+ }
+
+ const buzz::XmlElement* BandwidthFromContent(buzz::XmlElement* content) {
+ buzz::XmlElement* content_desc =
+ content->FirstNamed(cricket::QN_JINGLE_RTP_CONTENT);
+ if (!content_desc)
+ return NULL;
+
+ return content_desc->FirstNamed(cricket::QN_JINGLE_RTP_BANDWIDTH);
+ }
+
+ const buzz::XmlElement* RtcpMuxFromContent(buzz::XmlElement* content) {
+ return content->FirstNamed(cricket::QN_JINGLE_RTCP_MUX);
+ }
+
+ private:
+ talk_base::scoped_ptr<buzz::XmlElement> action_;
+};
+
+class GingleSessionTestParser : public MediaSessionTestParser {
+ public:
+ GingleSessionTestParser() : found_content_count_(0) {}
+
+ buzz::XmlElement* ActionFromStanza(buzz::XmlElement* stanza) {
+ return stanza->FirstNamed(cricket::QN_GINGLE_SESSION);
+ }
+
+ buzz::XmlElement* ContentFromAction(buzz::XmlElement* session) {
+ buzz::XmlElement* content =
+ session->FirstNamed(cricket::QN_GINGLE_AUDIO_CONTENT);
+ if (content == NULL)
+ content = session->FirstNamed(cricket::QN_GINGLE_VIDEO_CONTENT);
+ return content;
+ }
+
+ // Assumes contents are in order of audio, and then video.
+ buzz::XmlElement* NextContent(buzz::XmlElement* content) {
+ found_content_count_++;
+ return content;
+ }
+
+ buzz::XmlElement* PayloadTypeFromContent(buzz::XmlElement* content) {
+ if (found_content_count_ > 0) {
+ return content->FirstNamed(cricket::QN_GINGLE_VIDEO_PAYLOADTYPE);
+ } else {
+ return content->FirstNamed(cricket::QN_GINGLE_AUDIO_PAYLOADTYPE);
+ }
+ }
+
+ buzz::XmlElement* NextFromPayloadType(buzz::XmlElement* payload_type) {
+ if (found_content_count_ > 0) {
+ return payload_type->NextNamed(cricket::QN_GINGLE_VIDEO_PAYLOADTYPE);
+ } else {
+ return payload_type->NextNamed(cricket::QN_GINGLE_AUDIO_PAYLOADTYPE);
+ }
+ }
+
+ cricket::AudioCodec AudioCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) {
+ int id = 0;
+ if (payload_type->HasAttr(cricket::QN_ID))
+ id = atoi(payload_type->Attr(cricket::QN_ID).c_str());
+
+ std::string name;
+ if (payload_type->HasAttr(cricket::QN_NAME))
+ name = payload_type->Attr(cricket::QN_NAME);
+
+ int clockrate = 0;
+ if (payload_type->HasAttr(cricket::QN_CLOCKRATE))
+ clockrate = atoi(payload_type->Attr(cricket::QN_CLOCKRATE).c_str());
+
+ int bitrate = 0;
+ if (payload_type->HasAttr(cricket::QN_BITRATE))
+ bitrate = atoi(payload_type->Attr(cricket::QN_BITRATE).c_str());
+
+ int channels = 1;
+ if (payload_type->HasAttr(cricket::QN_CHANNELS))
+ channels = atoi(payload_type->Attr(cricket::QN_CHANNELS).c_str());
+
+ return cricket::AudioCodec(id, name, clockrate, bitrate, channels, 0);
+ }
+
+ cricket::VideoCodec VideoCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) {
+ int id = 0;
+ if (payload_type->HasAttr(cricket::QN_ID))
+ id = atoi(payload_type->Attr(cricket::QN_ID).c_str());
+
+ std::string name;
+ if (payload_type->HasAttr(cricket::QN_NAME))
+ name = payload_type->Attr(cricket::QN_NAME);
+
+ int width = 0;
+ if (payload_type->HasAttr(cricket::QN_WIDTH))
+ width = atoi(payload_type->Attr(cricket::QN_WIDTH).c_str());
+
+ int height = 0;
+ if (payload_type->HasAttr(cricket::QN_HEIGHT))
+ height = atoi(payload_type->Attr(cricket::QN_HEIGHT).c_str());
+
+ int framerate = 1;
+ if (payload_type->HasAttr(cricket::QN_FRAMERATE))
+ framerate = atoi(payload_type->Attr(cricket::QN_FRAMERATE).c_str());
+
+ return cricket::VideoCodec(id, name, width, height, framerate, 0);
+ }
+
+ cricket::DataCodec DataCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) {
+ // Gingle can't do data codecs.
+ return cricket::DataCodec(0, "", 0);
+ }
+
+ buzz::XmlElement* EncryptionFromContent(
+ buzz::XmlElement* content) {
+ return content->FirstNamed(cricket::QN_ENCRYPTION);
+ }
+
+ buzz::XmlElement* NextFromEncryption(buzz::XmlElement* encryption) {
+ return encryption->NextNamed(cricket::QN_ENCRYPTION);
+ }
+
+ const buzz::XmlElement* BandwidthFromContent(buzz::XmlElement* content) {
+ return content->FirstNamed(cricket::QN_GINGLE_VIDEO_BANDWIDTH);
+ }
+
+ const buzz::XmlElement* RtcpMuxFromContent(buzz::XmlElement* content) {
+ return NULL;
+ }
+
+ bool ActionIsTerminate(const buzz::XmlElement* session) {
+ return (session->HasAttr(buzz::QN_TYPE) &&
+ session->Attr(buzz::QN_TYPE) == "terminate");
+ }
+
+ int found_content_count_;
+};
+
+class MediaSessionClientTest : public sigslot::has_slots<> {
+ public:
+ explicit MediaSessionClientTest(MediaSessionTestParser* parser,
+ cricket::SignalingProtocol initial_protocol) {
+ nm_ = new talk_base::BasicNetworkManager();
+ pa_ = new cricket::BasicPortAllocator(nm_);
+ sm_ = new cricket::SessionManager(pa_, NULL);
+ fme_ = new cricket::FakeMediaEngine();
+ fdme_ = new cricket::FakeDataEngine();
+
+ FeedbackParams params_nack_fir;
+ params_nack_fir.Add(FeedbackParam(cricket::kRtcpFbParamCcm,
+ cricket::kRtcpFbCcmParamFir));
+ params_nack_fir.Add(FeedbackParam(cricket::kRtcpFbParamNack));
+ FeedbackParams params_nack;
+ params_nack.Add(FeedbackParam(cricket::kRtcpFbParamNack));
+
+ std::vector<cricket::AudioCodec>
+ audio_codecs(kAudioCodecs, kAudioCodecs + ARRAY_SIZE(kAudioCodecs));
+ SetCodecFeedbackParams(&audio_codecs, params_nack);
+ fme_->SetAudioCodecs(audio_codecs);
+ std::vector<cricket::VideoCodec>
+ video_codecs(kVideoCodecs, kVideoCodecs + ARRAY_SIZE(kVideoCodecs));
+ SetCodecFeedbackParams(&video_codecs, params_nack_fir);
+ fme_->SetVideoCodecs(video_codecs);
+ std::vector<cricket::DataCodec>
+ data_codecs(kDataCodecs, kDataCodecs + ARRAY_SIZE(kDataCodecs));
+ SetCodecFeedbackParams(&data_codecs, params_nack);
+ fdme_->SetDataCodecs(data_codecs);
+
+ client_ = new cricket::MediaSessionClient(
+ buzz::Jid("user@domain.com/resource"), sm_,
+ fme_, fdme_, new cricket::FakeDeviceManager());
+ client_->session_manager()->SignalOutgoingMessage.connect(
+ this, &MediaSessionClientTest::OnSendStanza);
+ client_->session_manager()->SignalSessionCreate.connect(
+ this, &MediaSessionClientTest::OnSessionCreate);
+ client_->SignalCallCreate.connect(
+ this, &MediaSessionClientTest::OnCallCreate);
+ client_->SignalCallDestroy.connect(
+ this, &MediaSessionClientTest::OnCallDestroy);
+
+ call_ = NULL;
+ parser_ = parser;
+ initial_protocol_ = initial_protocol;
+ expect_incoming_crypto_ = false;
+ expect_outgoing_crypto_ = false;
+ expected_video_bandwidth_ = cricket::kAutoBandwidth;
+ expected_video_rtcp_mux_ = false;
+ }
+
+ ~MediaSessionClientTest() {
+ delete client_;
+ delete sm_;
+ delete pa_;
+ delete nm_;
+ delete parser_;
+ ClearStanzas();
+ }
+
+ buzz::XmlElement* ActionFromStanza(buzz::XmlElement* stanza) {
+ return parser_->ActionFromStanza(stanza);
+ }
+
+ buzz::XmlElement* ContentFromAction(buzz::XmlElement* action) {
+ return parser_->ContentFromAction(action);
+ }
+
+ buzz::XmlElement* PayloadTypeFromContent(buzz::XmlElement* payload) {
+ return parser_->PayloadTypeFromContent(payload);
+ }
+
+ buzz::XmlElement* NextFromPayloadType(buzz::XmlElement* payload_type) {
+ return parser_->NextFromPayloadType(payload_type);
+ }
+
+ buzz::XmlElement* EncryptionFromContent(buzz::XmlElement* content) {
+ return parser_->EncryptionFromContent(content);
+ }
+
+ buzz::XmlElement* NextFromEncryption(buzz::XmlElement* encryption) {
+ return parser_->NextFromEncryption(encryption);
+ }
+
+ cricket::AudioCodec AudioCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) {
+ return parser_->AudioCodecFromPayloadType(payload_type);
+ }
+
+ cricket::VideoCodec VideoCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) {
+ return parser_->VideoCodecFromPayloadType(payload_type);
+ }
+
+ cricket::DataCodec DataCodecFromPayloadType(
+ const buzz::XmlElement* payload_type) {
+ return parser_->DataCodecFromPayloadType(payload_type);
+ }
+
+ const AudioContentDescription* GetFirstAudioContentDescription(
+ const cricket::SessionDescription* sdesc) {
+ const cricket::ContentInfo* content =
+ cricket::GetFirstAudioContent(sdesc);
+ if (content == NULL)
+ return NULL;
+ return static_cast<const AudioContentDescription*>(content->description);
+ }
+
+ const cricket::VideoContentDescription* GetFirstVideoContentDescription(
+ const cricket::SessionDescription* sdesc) {
+ const cricket::ContentInfo* content =
+ cricket::GetFirstVideoContent(sdesc);
+ if (content == NULL)
+ return NULL;
+ return static_cast<const cricket::VideoContentDescription*>(
+ content->description);
+ }
+
+ void CheckCryptoFromGoodIncomingInitiate(const cricket::Session* session) {
+ ASSERT_TRUE(session != NULL);
+ const AudioContentDescription* content =
+ GetFirstAudioContentDescription(session->remote_description());
+ ASSERT_TRUE(content != NULL);
+ ASSERT_EQ(2U, content->cryptos().size());
+ ASSERT_EQ(145, content->cryptos()[0].tag);
+ ASSERT_EQ("AES_CM_128_HMAC_SHA1_32", content->cryptos()[0].cipher_suite);
+ ASSERT_EQ("inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9",
+ content->cryptos()[0].key_params);
+ ASSERT_EQ(51, content->cryptos()[1].tag);
+ ASSERT_EQ("AES_CM_128_HMAC_SHA1_80", content->cryptos()[1].cipher_suite);
+ ASSERT_EQ("inline:J4lfdUL8W1F7TNJKcbuygaQuA429SJy2e9JctPUy",
+ content->cryptos()[1].key_params);
+ }
+
+ void CheckCryptoForGoodOutgoingAccept(const cricket::Session* session) {
+ const AudioContentDescription* content =
+ GetFirstAudioContentDescription(session->local_description());
+ ASSERT_EQ(1U, content->cryptos().size());
+ ASSERT_EQ(145, content->cryptos()[0].tag);
+ ASSERT_EQ("AES_CM_128_HMAC_SHA1_32", content->cryptos()[0].cipher_suite);
+ ASSERT_EQ(47U, content->cryptos()[0].key_params.size());
+ }
+
+ void CheckBadCryptoFromIncomingInitiate(const cricket::Session* session) {
+ const AudioContentDescription* content =
+ GetFirstAudioContentDescription(session->remote_description());
+ ASSERT_EQ(1U, content->cryptos().size());
+ ASSERT_EQ(145, content->cryptos()[0].tag);
+ ASSERT_EQ("NOT_SUPPORTED", content->cryptos()[0].cipher_suite);
+ ASSERT_EQ("inline:hsWuSQJxx7przmb8HM+ZkeNcG3HezSNID7LmfDa9",
+ content->cryptos()[0].key_params);
+ }
+
+ void CheckNoCryptoForOutgoingAccept(const cricket::Session* session) {
+ const AudioContentDescription* content =
+ GetFirstAudioContentDescription(session->local_description());
+ ASSERT_TRUE(content->cryptos().empty());
+ }
+
+ void CheckRtcpFb(const cricket::SessionDescription* sdesc) {
+ VerifyCodecFbParams<AudioCodec>(expected_audio_fb_params_,
+ GetFirstAudioContentDescription(sdesc));
+
+ VerifyCodecFbParams<VideoCodec>(expected_video_fb_params_,
+ GetFirstVideoContentDescription(sdesc));
+
+ VerifyCodecFbParams<DataCodec>(expected_data_fb_params_,
+ GetFirstDataContentDescription(sdesc));
+ }
+
+ void CheckVideoBandwidth(int expected_bandwidth,
+ const cricket::SessionDescription* sdesc) {
+ const cricket::VideoContentDescription* video =
+ GetFirstVideoContentDescription(sdesc);
+ if (video != NULL) {
+ ASSERT_EQ(expected_bandwidth, video->bandwidth());
+ }
+ }
+
+ void CheckVideoRtcpMux(bool expected_video_rtcp_mux,
+ const cricket::SessionDescription* sdesc) {
+ const cricket::VideoContentDescription* video =
+ GetFirstVideoContentDescription(sdesc);
+ if (video != NULL) {
+ ASSERT_EQ(expected_video_rtcp_mux, video->rtcp_mux());
+ }
+ }
+
+ virtual void CheckRtpDataContent(buzz::XmlElement* content) {
+ if (initial_protocol_) {
+ // Gingle can not write out data content.
+ return;
+ }
+
+ buzz::XmlElement* e = PayloadTypeFromContent(content);
+ ASSERT_TRUE(e != NULL);
+ cricket::DataCodec codec = DataCodecFromPayloadType(e);
+ EXPECT_EQ(127, codec.id);
+ EXPECT_EQ("google-data", codec.name);
+ EXPECT_EQ(expected_data_fb_params_, codec.feedback_params);
+
+ CheckDataRtcpMux(true, call_->sessions()[0]->local_description());
+ CheckDataRtcpMux(true, call_->sessions()[0]->remote_description());
+ if (expect_outgoing_crypto_) {
+ content = parser_->NextContent(content);
+ buzz::XmlElement* encryption = EncryptionFromContent(content);
+ ASSERT_TRUE(encryption != NULL);
+ // TODO(pthatcher): Check encryption parameters?
+ }
+ }
+
+ virtual void CheckSctpDataContent(buzz::XmlElement* content) {
+ if (initial_protocol_) {
+ // Gingle can not write out data content.
+ return;
+ }
+
+ buzz::XmlElement* payload_type = PayloadTypeFromContent(content);
+ ASSERT_TRUE(payload_type == NULL);
+ buzz::XmlElement* encryption = EncryptionFromContent(content);
+ ASSERT_TRUE(encryption == NULL);
+ // TODO(pthatcher): Check for <streams>.
+ }
+
+ void CheckDataRtcpMux(bool expected_data_rtcp_mux,
+ const cricket::SessionDescription* sdesc) {
+ const cricket::DataContentDescription* data =
+ GetFirstDataContentDescription(sdesc);
+ if (data != NULL) {
+ ASSERT_EQ(expected_data_rtcp_mux, data->rtcp_mux());
+ }
+ }
+
+ void CheckAudioSsrcForIncomingAccept(const cricket::Session* session) {
+ const AudioContentDescription* audio =
+ GetFirstAudioContentDescription(session->remote_description());
+ ASSERT_TRUE(audio != NULL);
+ ASSERT_EQ(kAudioSsrc, audio->first_ssrc());
+ }
+
+ void CheckVideoSsrcForIncomingAccept(const cricket::Session* session) {
+ const cricket::VideoContentDescription* video =
+ GetFirstVideoContentDescription(session->remote_description());
+ ASSERT_TRUE(video != NULL);
+ ASSERT_EQ(kVideoSsrc, video->first_ssrc());
+ }
+
+ void CheckDataSsrcForIncomingAccept(const cricket::Session* session) {
+ const cricket::DataContentDescription* data =
+ GetFirstDataContentDescription(session->remote_description());
+ ASSERT_TRUE(data != NULL);
+ ASSERT_EQ(kDataSsrc, data->first_ssrc());
+ }
+
+ void TestGoodIncomingInitiate(const std::string& initiate_string,
+ const cricket::CallOptions& options,
+ buzz::XmlElement** element) {
+ *element = NULL;
+
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(initiate_string));
+ client_->session_manager()->OnIncomingMessage(el.get());
+ ASSERT_TRUE(call_ != NULL);
+ ASSERT_TRUE(call_->sessions()[0] != NULL);
+ ASSERT_EQ(cricket::Session::STATE_RECEIVEDINITIATE,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_RESULT), stanzas_[0]->Attr(buzz::QN_TYPE));
+ ClearStanzas();
+ CheckVideoBandwidth(expected_video_bandwidth_,
+ call_->sessions()[0]->remote_description());
+ CheckVideoRtcpMux(expected_video_rtcp_mux_,
+ call_->sessions()[0]->remote_description());
+ CheckRtcpFb(call_->sessions()[0]->remote_description());
+ if (expect_incoming_crypto_) {
+ CheckCryptoFromGoodIncomingInitiate(call_->sessions()[0]);
+ }
+
+ // TODO(pthatcher): Add tests for sending <bandwidth> in accept.
+ call_->AcceptSession(call_->sessions()[0], options);
+ ASSERT_EQ(cricket::Session::STATE_SENTACCEPT,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_SET), stanzas_[0]->Attr(buzz::QN_TYPE));
+
+ buzz::XmlElement* e = ActionFromStanza(stanzas_[0]);
+ ASSERT_TRUE(e != NULL);
+ ASSERT_TRUE(ContentFromAction(e) != NULL);
+ *element = CopyElement(ContentFromAction(e));
+ ASSERT_TRUE(*element != NULL);
+ ClearStanzas();
+ if (expect_outgoing_crypto_) {
+ CheckCryptoForGoodOutgoingAccept(call_->sessions()[0]);
+ }
+
+ if (options.data_channel_type == cricket::DCT_RTP) {
+ CheckDataRtcpMux(true, call_->sessions()[0]->local_description());
+ CheckDataRtcpMux(true, call_->sessions()[0]->remote_description());
+ // TODO(pthatcher): Check rtcpmux and crypto?
+ }
+
+ call_->Terminate();
+ ASSERT_EQ(cricket::Session::STATE_SENTTERMINATE,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_SET), stanzas_[0]->Attr(buzz::QN_TYPE));
+ e = ActionFromStanza(stanzas_[0]);
+ ASSERT_TRUE(e != NULL);
+ ASSERT_TRUE(parser_->ActionIsTerminate(e));
+ ClearStanzas();
+ }
+
+ void TestRejectOffer(const std::string &initiate_string,
+ const cricket::CallOptions& options,
+ buzz::XmlElement** element) {
+ *element = NULL;
+
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(initiate_string));
+ client_->session_manager()->OnIncomingMessage(el.get());
+ ASSERT_TRUE(call_ != NULL);
+ ASSERT_TRUE(call_->sessions()[0] != NULL);
+ ASSERT_EQ(cricket::Session::STATE_RECEIVEDINITIATE,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_RESULT), stanzas_[0]->Attr(buzz::QN_TYPE));
+ ClearStanzas();
+
+ call_->AcceptSession(call_->sessions()[0], options);
+ ASSERT_EQ(cricket::Session::STATE_SENTACCEPT,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_SET), stanzas_[0]->Attr(buzz::QN_TYPE));
+
+ buzz::XmlElement* e = ActionFromStanza(stanzas_[0]);
+ ASSERT_TRUE(e != NULL);
+ ASSERT_TRUE(ContentFromAction(e) != NULL);
+ *element = CopyElement(ContentFromAction(e));
+ ASSERT_TRUE(*element != NULL);
+ ClearStanzas();
+
+ buzz::XmlElement* content = *element;
+ // The NextContent method actually returns the second content. So we
+ // can't handle the case when audio, video and data are all enabled. But
+ // since we are testing rejection, it won't be the case.
+ if (options.has_audio) {
+ ASSERT_TRUE(content != NULL);
+ ASSERT_EQ("test audio", content->Attr(buzz::QName("", "name")));
+ content = parser_->NextContent(content);
+ }
+
+ if (options.has_video) {
+ ASSERT_TRUE(content != NULL);
+ ASSERT_EQ("test video", content->Attr(buzz::QName("", "name")));
+ content = parser_->NextContent(content);
+ }
+
+ if (options.has_data()) {
+ ASSERT_TRUE(content != NULL);
+ ASSERT_EQ("test data", content->Attr(buzz::QName("", "name")));
+ content = parser_->NextContent(content);
+ }
+
+ call_->Terminate();
+ ASSERT_EQ(cricket::Session::STATE_SENTTERMINATE,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_SET), stanzas_[0]->Attr(buzz::QN_TYPE));
+ e = ActionFromStanza(stanzas_[0]);
+ ASSERT_TRUE(e != NULL);
+ ASSERT_TRUE(parser_->ActionIsTerminate(e));
+ ClearStanzas();
+ }
+
+ void TestBadIncomingInitiate(const std::string& initiate_string) {
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(initiate_string));
+ client_->session_manager()->OnIncomingMessage(el.get());
+ ASSERT_TRUE(call_ != NULL);
+ ASSERT_TRUE(call_->sessions()[0] != NULL);
+ ASSERT_EQ(cricket::Session::STATE_SENTREJECT,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(2U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[1]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_RESULT), stanzas_[1]->Attr(buzz::QN_TYPE));
+ ClearStanzas();
+ }
+
+ void VerifyAudioCodec(const AudioCodec& codec, int id,
+ const std::string& name, int clockrate,
+ int bitrate, int channels) {
+ ASSERT_EQ(id, codec.id);
+ ASSERT_EQ(name, codec.name);
+ ASSERT_EQ(clockrate, codec.clockrate);
+ ASSERT_EQ(bitrate, codec.bitrate);
+ ASSERT_EQ(channels, codec.channels);
+ ASSERT_EQ(expected_audio_fb_params_, codec.feedback_params);
+ }
+
+ void TestGoodOutgoingInitiate(const cricket::CallOptions& options) {
+ if (initial_protocol_ == cricket::PROTOCOL_JINGLE) {
+ // rtcp fb is only implemented for jingle.
+ ExpectRtcpFb();
+ }
+
+ client_->CreateCall();
+ ASSERT_TRUE(call_ != NULL);
+ call_->InitiateSession(buzz::Jid("me@mydomain.com"),
+ buzz::Jid("me@mydomain.com"), options);
+ ASSERT_TRUE(call_->sessions()[0] != NULL);
+ ASSERT_EQ(cricket::Session::STATE_SENTINITIATE,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_SET), stanzas_[0]->Attr(buzz::QN_TYPE));
+ buzz::XmlElement* action = ActionFromStanza(stanzas_[0]);
+ ASSERT_TRUE(action != NULL);
+ buzz::XmlElement* content = ContentFromAction(action);
+ ASSERT_TRUE(content != NULL);
+
+ buzz::XmlElement* e = PayloadTypeFromContent(content);
+ ASSERT_TRUE(e != NULL);
+ cricket::AudioCodec codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 103, "ISAC", 16000, 0, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 104, "ISAC", 32000, 0, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 119, "ISACLC", 16000, 40000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 99, "speex", 16000, 22000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 97, "IPCMWB", 16000, 80000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 9, "G722", 16000, 64000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 102, "iLBC", 8000, 13300, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 98, "speex", 8000, 11000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 3, "GSM", 8000, 13000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 100, "EG711U", 8000, 64000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 101, "EG711A", 8000, 64000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 0, "PCMU", 8000, 64000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 8, "PCMA", 8000, 64000, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 126, "CN", 32000, 0, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 105, "CN", 16000, 0, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 13, "CN", 8000, 0, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 117, "red", 8000, 0, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ VerifyAudioCodec(codec, 106, "telephone-event", 8000, 0, 1);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e == NULL);
+
+ if (expect_outgoing_crypto_) {
+ buzz::XmlElement* encryption = EncryptionFromContent(content);
+ ASSERT_TRUE(encryption != NULL);
+
+ if (client_->secure() == cricket::SEC_REQUIRED) {
+ ASSERT_TRUE(cricket::GetXmlAttr(
+ encryption, cricket::QN_ENCRYPTION_REQUIRED, false));
+ }
+
+ if (content->Name().Namespace() == cricket::NS_GINGLE_AUDIO) {
+ e = encryption->FirstNamed(cricket::QN_GINGLE_AUDIO_CRYPTO_USAGE);
+ ASSERT_TRUE(e != NULL);
+ ASSERT_TRUE(
+ e->NextNamed(cricket::QN_GINGLE_AUDIO_CRYPTO_USAGE) == NULL);
+ ASSERT_TRUE(
+ e->FirstNamed(cricket::QN_GINGLE_VIDEO_CRYPTO_USAGE) == NULL);
+ }
+
+ e = encryption->FirstNamed(cricket::QN_CRYPTO);
+ ASSERT_TRUE(e != NULL);
+ ASSERT_EQ("0", e->Attr(cricket::QN_CRYPTO_TAG));
+ ASSERT_EQ("AES_CM_128_HMAC_SHA1_32", e->Attr(cricket::QN_CRYPTO_SUITE));
+ std::string key_0 = e->Attr(cricket::QN_CRYPTO_KEY_PARAMS);
+ ASSERT_EQ(47U, key_0.length());
+ ASSERT_EQ("inline:", key_0.substr(0, 7));
+
+ e = e->NextNamed(cricket::QN_CRYPTO);
+ ASSERT_TRUE(e != NULL);
+ ASSERT_EQ("1", e->Attr(cricket::QN_CRYPTO_TAG));
+ ASSERT_EQ("AES_CM_128_HMAC_SHA1_80", e->Attr(cricket::QN_CRYPTO_SUITE));
+ std::string key_1 = e->Attr(cricket::QN_CRYPTO_KEY_PARAMS);
+ ASSERT_EQ(47U, key_1.length());
+ ASSERT_EQ("inline:", key_1.substr(0, 7));
+ ASSERT_NE(key_0, key_1);
+
+ encryption = NextFromEncryption(encryption);
+ ASSERT_TRUE(encryption == NULL);
+ }
+
+ if (options.has_video) {
+ CheckVideoBandwidth(options.video_bandwidth,
+ call_->sessions()[0]->local_description());
+ CheckVideoRtcpMux(expected_video_rtcp_mux_,
+ call_->sessions()[0]->remote_description());
+ content = parser_->NextContent(content);
+ const buzz::XmlElement* bandwidth =
+ parser_->BandwidthFromContent(content);
+ if (options.video_bandwidth == cricket::kAutoBandwidth) {
+ ASSERT_TRUE(bandwidth == NULL);
+ } else {
+ ASSERT_TRUE(bandwidth != NULL);
+ ASSERT_EQ("AS", bandwidth->Attr(buzz::QName("", "type")));
+ ASSERT_EQ(talk_base::ToString(options.video_bandwidth / 1000),
+ bandwidth->BodyText());
+ }
+
+ buzz::XmlElement* e = PayloadTypeFromContent(content);
+ ASSERT_TRUE(e != NULL);
+ VideoCodec codec = VideoCodecFromPayloadType(e);
+ VideoCodec expected_codec = kVideoCodecs[0];
+ expected_codec.preference = codec.preference;
+ expected_codec.feedback_params = expected_video_fb_params_;
+ EXPECT_EQ(expected_codec, codec);
+ }
+
+ if (options.data_channel_type == cricket::DCT_RTP) {
+ content = parser_->NextContent(content);
+ CheckRtpDataContent(content);
+ }
+
+ if (options.data_channel_type == cricket::DCT_SCTP) {
+ content = parser_->NextContent(content);
+ CheckSctpDataContent(content);
+ }
+
+ ClearStanzas();
+ }
+
+ void TestHasAllSupportedAudioCodecs(buzz::XmlElement* e) {
+ ASSERT_TRUE(e != NULL);
+
+ e = PayloadTypeFromContent(e);
+ ASSERT_TRUE(e != NULL);
+ cricket::AudioCodec codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(103, codec.id);
+ ASSERT_EQ("ISAC", codec.name);
+ ASSERT_EQ(16000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(104, codec.id);
+ ASSERT_EQ("ISAC", codec.name);
+ ASSERT_EQ(32000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(119, codec.id);
+ ASSERT_EQ("ISACLC", codec.name);
+ ASSERT_EQ(16000, codec.clockrate);
+ ASSERT_EQ(40000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(99, codec.id);
+ ASSERT_EQ("speex", codec.name);
+ ASSERT_EQ(16000, codec.clockrate);
+ ASSERT_EQ(22000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(97, codec.id);
+ ASSERT_EQ("IPCMWB", codec.name);
+ ASSERT_EQ(16000, codec.clockrate);
+ ASSERT_EQ(80000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(9, codec.id);
+ ASSERT_EQ("G722", codec.name);
+ ASSERT_EQ(16000, codec.clockrate);
+ ASSERT_EQ(64000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(102, codec.id);
+ ASSERT_EQ("iLBC", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(13300, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(98, codec.id);
+ ASSERT_EQ("speex", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(11000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(3, codec.id);
+ ASSERT_EQ("GSM", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(13000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(100, codec.id);
+ ASSERT_EQ("EG711U", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(64000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(101, codec.id);
+ ASSERT_EQ("EG711A", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(64000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(0, codec.id);
+ ASSERT_EQ("PCMU", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(64000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(8, codec.id);
+ ASSERT_EQ("PCMA", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(64000, codec.bitrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(126, codec.id);
+ ASSERT_EQ("CN", codec.name);
+ ASSERT_EQ(32000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(105, codec.id);
+ ASSERT_EQ("CN", codec.name);
+ ASSERT_EQ(16000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(13, codec.id);
+ ASSERT_EQ("CN", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(117, codec.id);
+ ASSERT_EQ("red", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(106, codec.id);
+ ASSERT_EQ("telephone-event", codec.name);
+ ASSERT_EQ(8000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e == NULL);
+ }
+
+ void TestCodecsOfVideoInitiate(buzz::XmlElement* content) {
+ ASSERT_TRUE(content != NULL);
+ buzz::XmlElement* payload_type = PayloadTypeFromContent(content);
+ ASSERT_TRUE(payload_type != NULL);
+ cricket::AudioCodec codec = AudioCodecFromPayloadType(payload_type);
+ ASSERT_EQ(103, codec.id);
+ ASSERT_EQ("ISAC", codec.name);
+ ASSERT_EQ(16000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ content = parser_->NextContent(content);
+ ASSERT_TRUE(content != NULL);
+ payload_type = PayloadTypeFromContent(content);
+ ASSERT_TRUE(payload_type != NULL);
+ cricket::VideoCodec vcodec =
+ parser_->VideoCodecFromPayloadType(payload_type);
+ ASSERT_EQ(99, vcodec.id);
+ ASSERT_EQ("H264-SVC", vcodec.name);
+ ASSERT_EQ(320, vcodec.width);
+ ASSERT_EQ(200, vcodec.height);
+ ASSERT_EQ(30, vcodec.framerate);
+ }
+
+ void TestHasAudioCodecsFromInitiateSomeUnsupported(buzz::XmlElement* e) {
+ ASSERT_TRUE(e != NULL);
+ e = PayloadTypeFromContent(e);
+ ASSERT_TRUE(e != NULL);
+
+ cricket::AudioCodec codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(103, codec.id);
+ ASSERT_EQ("ISAC", codec.name);
+ ASSERT_EQ(16000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(100, codec.id);
+ ASSERT_EQ("EG711U", codec.name);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(101, codec.id);
+ ASSERT_EQ("EG711A", codec.name);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(0, codec.id);
+ ASSERT_EQ("PCMU", codec.name);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(13, codec.id);
+ ASSERT_EQ("CN", codec.name);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e == NULL);
+ }
+
+ void TestHasAudioCodecsFromInitiateDynamicAudioCodecs(
+ buzz::XmlElement* e) {
+ ASSERT_TRUE(e != NULL);
+ e = PayloadTypeFromContent(e);
+ ASSERT_TRUE(e != NULL);
+
+ cricket::AudioCodec codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(123, codec.id);
+ ASSERT_EQ(16000, codec.clockrate);
+ ASSERT_EQ(1, codec.channels);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e == NULL);
+ }
+
+ void TestHasDefaultAudioCodecs(buzz::XmlElement* e) {
+ ASSERT_TRUE(e != NULL);
+ e = PayloadTypeFromContent(e);
+ ASSERT_TRUE(e != NULL);
+
+ cricket::AudioCodec codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(103, codec.id);
+ ASSERT_EQ("ISAC", codec.name);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(0, codec.id);
+ ASSERT_EQ("PCMU", codec.name);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e == NULL);
+ }
+
+ void TestHasAudioCodecsFromInitiateStaticAudioCodecs(
+ buzz::XmlElement* e) {
+ ASSERT_TRUE(e != NULL);
+ e = PayloadTypeFromContent(e);
+ ASSERT_TRUE(e != NULL);
+
+ cricket::AudioCodec codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(3, codec.id);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(0, codec.id);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e != NULL);
+ codec = AudioCodecFromPayloadType(e);
+ ASSERT_EQ(8, codec.id);
+
+ e = NextFromPayloadType(e);
+ ASSERT_TRUE(e == NULL);
+ }
+
+ void TestGingleInitiateWithUnsupportedCrypto(
+ const std::string &initiate_string,
+ buzz::XmlElement** element) {
+ *element = NULL;
+
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(initiate_string));
+ client_->session_manager()->OnIncomingMessage(el.get());
+
+ ASSERT_EQ(cricket::Session::STATE_RECEIVEDINITIATE,
+ call_->sessions()[0]->state());
+ ClearStanzas();
+ CheckBadCryptoFromIncomingInitiate(call_->sessions()[0]);
+
+ call_->AcceptSession(call_->sessions()[0], cricket::CallOptions());
+ ClearStanzas();
+ CheckNoCryptoForOutgoingAccept(call_->sessions()[0]);
+
+ call_->Terminate();
+ ASSERT_EQ(cricket::Session::STATE_SENTTERMINATE,
+ call_->sessions()[0]->state());
+ ClearStanzas();
+ }
+
+ void TestIncomingAcceptWithSsrcs(
+ const std::string& accept_string,
+ cricket::CallOptions& options) {
+ client_->CreateCall();
+ ASSERT_TRUE(call_ != NULL);
+
+ call_->InitiateSession(buzz::Jid("me@mydomain.com"),
+ buzz::Jid("me@mydomain.com"), options);
+ ASSERT_TRUE(call_->sessions()[0] != NULL);
+ ASSERT_EQ(cricket::Session::STATE_SENTINITIATE,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_SET), stanzas_[0]->Attr(buzz::QN_TYPE));
+ buzz::XmlElement* action = ActionFromStanza(stanzas_[0]);
+ ASSERT_TRUE(action != NULL);
+ buzz::XmlElement* content = ContentFromAction(action);
+ ASSERT_TRUE(content != NULL);
+ if (initial_protocol_ == cricket::PROTOCOL_JINGLE) {
+ buzz::XmlElement* content_desc =
+ content->FirstNamed(cricket::QN_JINGLE_RTP_CONTENT);
+ ASSERT_TRUE(content_desc != NULL);
+ ASSERT_EQ("", content_desc->Attr(cricket::QN_SSRC));
+ }
+ ClearStanzas();
+
+ // We need to insert the session ID into the session accept message.
+ talk_base::scoped_ptr<buzz::XmlElement> el(
+ buzz::XmlElement::ForStr(accept_string));
+ const std::string sid = call_->sessions()[0]->id();
+ if (initial_protocol_ == cricket::PROTOCOL_JINGLE) {
+ buzz::XmlElement* jingle = el->FirstNamed(cricket::QN_JINGLE);
+ jingle->SetAttr(cricket::QN_SID, sid);
+ } else {
+ buzz::XmlElement* session = el->FirstNamed(cricket::QN_GINGLE_SESSION);
+ session->SetAttr(cricket::QN_ID, sid);
+ }
+
+ client_->session_manager()->OnIncomingMessage(el.get());
+
+ ASSERT_EQ(cricket::Session::STATE_RECEIVEDACCEPT,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_TRUE(buzz::QN_IQ == stanzas_[0]->Name());
+ ASSERT_TRUE(stanzas_[0]->HasAttr(buzz::QN_TYPE));
+ ASSERT_EQ(std::string(buzz::STR_RESULT), stanzas_[0]->Attr(buzz::QN_TYPE));
+ ClearStanzas();
+
+ CheckAudioSsrcForIncomingAccept(call_->sessions()[0]);
+ CheckVideoSsrcForIncomingAccept(call_->sessions()[0]);
+ if (options.data_channel_type == cricket::DCT_RTP) {
+ CheckDataSsrcForIncomingAccept(call_->sessions()[0]);
+ }
+ // TODO(pthatcher): Check kDataSid if DCT_SCTP.
+ }
+
+ size_t ClearStanzas() {
+ size_t size = stanzas_.size();
+ for (size_t i = 0; i < size; i++) {
+ delete stanzas_[i];
+ }
+ stanzas_.clear();
+ return size;
+ }
+
+ buzz::XmlElement* SetJingleSid(buzz::XmlElement* stanza) {
+ buzz::XmlElement* jingle =
+ stanza->FirstNamed(cricket::QN_JINGLE);
+ jingle->SetAttr(cricket::QN_SID, call_->sessions()[0]->id());
+ return stanza;
+ }
+
+ void TestSendVideoStreamUpdate() {
+ cricket::CallOptions options = VideoCallOptions();
+ options.is_muc = true;
+
+ client_->CreateCall();
+ call_->InitiateSession(buzz::Jid("me@mydomain.com"),
+ buzz::Jid("me@mydomain.com"), options);
+ ClearStanzas();
+
+ cricket::StreamParams stream;
+ stream.id = "test-stream";
+ stream.ssrcs.push_back(1001);
+ talk_base::scoped_ptr<buzz::XmlElement> expected_stream_add(
+ buzz::XmlElement::ForStr(
+ JingleOutboundStreamAdd(
+ call_->sessions()[0]->id(),
+ "video", stream.id, "1001")));
+ talk_base::scoped_ptr<buzz::XmlElement> expected_stream_remove(
+ buzz::XmlElement::ForStr(
+ JingleOutboundStreamRemove(
+ call_->sessions()[0]->id(),
+ "video", stream.id)));
+
+ call_->SendVideoStreamUpdate(call_->sessions()[0],
+ call_->CreateVideoStreamUpdate(stream));
+ ASSERT_EQ(1U, stanzas_.size());
+ EXPECT_EQ(expected_stream_add->Str(), stanzas_[0]->Str());
+ ClearStanzas();
+
+ stream.ssrcs.clear();
+ call_->SendVideoStreamUpdate(call_->sessions()[0],
+ call_->CreateVideoStreamUpdate(stream));
+ ASSERT_EQ(1U, stanzas_.size());
+ EXPECT_EQ(expected_stream_remove->Str(), stanzas_[0]->Str());
+ ClearStanzas();
+ }
+
+ void TestStreamsUpdateAndViewRequests() {
+ cricket::CallOptions options = VideoCallOptions();
+ options.is_muc = true;
+
+ client_->CreateCall();
+ call_->InitiateSession(buzz::Jid("me@mydomain.com"),
+ buzz::Jid("me@mydomain.com"), options);
+ ASSERT_EQ(1U, ClearStanzas());
+ ASSERT_EQ(0U, last_streams_added_.audio().size());
+ ASSERT_EQ(0U, last_streams_added_.video().size());
+ ASSERT_EQ(0U, last_streams_removed_.audio().size());
+ ASSERT_EQ(0U, last_streams_removed_.video().size());
+
+ talk_base::scoped_ptr<buzz::XmlElement> accept_stanza(
+ buzz::XmlElement::ForStr(kJingleAcceptWithSsrcs));
+ SetJingleSid(accept_stanza.get());
+ client_->session_manager()->OnIncomingMessage(accept_stanza.get());
+ ASSERT_EQ(cricket::Session::STATE_RECEIVEDACCEPT,
+ call_->sessions()[0]->state());
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_EQ(std::string(buzz::STR_RESULT), stanzas_[0]->Attr(buzz::QN_TYPE));
+ ClearStanzas();
+ // Need to clear the added streams, because they are populated when
+ // receiving an accept message now.
+ last_streams_added_.mutable_video()->clear();
+ last_streams_added_.mutable_audio()->clear();
+
+ call_->sessions()[0]->SetState(cricket::Session::STATE_INPROGRESS);
+
+ talk_base::scoped_ptr<buzz::XmlElement> streams_stanza(
+ buzz::XmlElement::ForStr(
+ JingleStreamAdd("video", "Bob", "video1", "ABC")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ // First one is ignored because of bad syntax.
+ ASSERT_EQ(1U, stanzas_.size());
+ // TODO(pthatcher): Figure out how to make this an ERROR rather than RESULT.
+ ASSERT_EQ(std::string(buzz::STR_ERROR), stanzas_[0]->Attr(buzz::QN_TYPE));
+ ClearStanzas();
+ ASSERT_EQ(0U, last_streams_added_.audio().size());
+ ASSERT_EQ(0U, last_streams_added_.video().size());
+ ASSERT_EQ(0U, last_streams_removed_.audio().size());
+ ASSERT_EQ(0U, last_streams_removed_.video().size());
+
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamAdd("audio", "Bob", "audio1", "1234")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(1U, last_streams_added_.audio().size());
+ ASSERT_EQ("Bob", last_streams_added_.audio()[0].groupid);
+ ASSERT_EQ(1U, last_streams_added_.audio()[0].ssrcs.size());
+ ASSERT_EQ(1234U, last_streams_added_.audio()[0].first_ssrc());
+
+ // Ignores adds without ssrcs.
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamAddWithoutSsrc("audio", "Bob", "audioX")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(1U, last_streams_added_.audio().size());
+ ASSERT_EQ(1234U, last_streams_added_.audio()[0].first_ssrc());
+
+ // Ignores stream updates with unknown content names. (Don't terminate).
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamAddWithoutSsrc("foo", "Bob", "foo")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamAdd("audio", "Joe", "audio1", "2468")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(1U, last_streams_added_.audio().size());
+ ASSERT_EQ("Joe", last_streams_added_.audio()[0].groupid);
+ ASSERT_EQ(1U, last_streams_added_.audio()[0].ssrcs.size());
+ ASSERT_EQ(2468U, last_streams_added_.audio()[0].first_ssrc());
+
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamAdd("video", "Bob", "video1", "5678")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(1U, last_streams_added_.video().size());
+ ASSERT_EQ("Bob", last_streams_added_.video()[0].groupid);
+ ASSERT_EQ(1U, last_streams_added_.video()[0].ssrcs.size());
+ ASSERT_EQ(5678U, last_streams_added_.video()[0].first_ssrc());
+
+ // We're testing that a "duplicate" is effectively ignored.
+ last_streams_added_.mutable_video()->clear();
+ last_streams_removed_.mutable_video()->clear();
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamAdd("video", "Bob", "video1", "5678")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(0U, last_streams_added_.video().size());
+ ASSERT_EQ(0U, last_streams_removed_.video().size());
+
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamAdd("video", "Bob", "video2", "5679")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(1U, last_streams_added_.video().size());
+ ASSERT_EQ("Bob", last_streams_added_.video()[0].groupid);
+ ASSERT_EQ(1U, last_streams_added_.video()[0].ssrcs.size());
+ ASSERT_EQ(5679U, last_streams_added_.video()[0].first_ssrc());
+
+ cricket::FakeVoiceMediaChannel* voice_channel = fme_->GetVoiceChannel(0);
+ ASSERT_TRUE(voice_channel != NULL);
+ ASSERT_TRUE(voice_channel->HasRecvStream(1234U));
+ ASSERT_TRUE(voice_channel->HasRecvStream(2468U));
+ cricket::FakeVideoMediaChannel* video_channel = fme_->GetVideoChannel(0);
+ ASSERT_TRUE(video_channel != NULL);
+ ASSERT_TRUE(video_channel->HasRecvStream(5678U));
+ ClearStanzas();
+
+ cricket::ViewRequest viewRequest;
+ cricket::StaticVideoView staticVideoView(
+ cricket::StreamSelector(5678U), 640, 480, 30);
+ viewRequest.static_video_views.push_back(staticVideoView);
+ talk_base::scoped_ptr<buzz::XmlElement> expected_view_elem(
+ buzz::XmlElement::ForStr(JingleView("5678", "640", "480", "30")));
+ SetJingleSid(expected_view_elem.get());
+
+ ASSERT_TRUE(
+ call_->SendViewRequest(call_->sessions()[0], viewRequest));
+ ASSERT_EQ(1U, stanzas_.size());
+ ASSERT_EQ(expected_view_elem->Str(), stanzas_[0]->Str());
+ ClearStanzas();
+
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamRemove("audio", "Bob", "audio1")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(1U, last_streams_removed_.audio().size());
+ ASSERT_EQ(1U, last_streams_removed_.audio()[0].ssrcs.size());
+ EXPECT_EQ(1234U, last_streams_removed_.audio()[0].first_ssrc());
+
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamRemove("video", "Bob", "video1")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(1U, last_streams_removed_.video().size());
+ ASSERT_EQ(1U, last_streams_removed_.video()[0].ssrcs.size());
+ EXPECT_EQ(5678U, last_streams_removed_.video()[0].first_ssrc());
+
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamRemove("video", "Bob", "video2")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(1U, last_streams_removed_.video().size());
+ ASSERT_EQ(1U, last_streams_removed_.video()[0].ssrcs.size());
+ EXPECT_EQ(5679U, last_streams_removed_.video()[0].first_ssrc());
+
+ // Duplicate removal: should be ignored.
+ last_streams_removed_.mutable_audio()->clear();
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamRemove("audio", "Bob", "audio1")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(0U, last_streams_removed_.audio().size());
+
+ // Duplicate removal: should be ignored.
+ last_streams_removed_.mutable_video()->clear();
+ streams_stanza.reset(buzz::XmlElement::ForStr(
+ JingleStreamRemove("video", "Bob", "video1")));
+ SetJingleSid(streams_stanza.get());
+ client_->session_manager()->OnIncomingMessage(streams_stanza.get());
+ ASSERT_EQ(0U, last_streams_removed_.video().size());
+
+ voice_channel = fme_->GetVoiceChannel(0);
+ ASSERT_TRUE(voice_channel != NULL);
+ ASSERT_FALSE(voice_channel->HasRecvStream(1234U));
+ ASSERT_TRUE(voice_channel->HasRecvStream(2468U));
+ video_channel = fme_->GetVideoChannel(0);
+ ASSERT_TRUE(video_channel != NULL);
+ ASSERT_FALSE(video_channel->HasRecvStream(5678U));
+
+ // Fails because ssrc is now invalid.
+ ASSERT_FALSE(
+ call_->SendViewRequest(call_->sessions()[0], viewRequest));
+
+ ClearStanzas();
+ }
+
+ void MakeSignalingSecure(cricket::SecureMediaPolicy secure) {
+ client_->set_secure(secure);
+ }
+
+ void ExpectCrypto(cricket::SecureMediaPolicy secure) {
+ MakeSignalingSecure(secure);
+ expect_incoming_crypto_ = true;
+#ifdef HAVE_SRTP
+ expect_outgoing_crypto_ = true;
+#endif
+ }
+
+ void ExpectVideoBandwidth(int bandwidth) {
+ expected_video_bandwidth_ = bandwidth;
+ }
+
+ void ExpectVideoRtcpMux(bool rtcp_mux) {
+ expected_video_rtcp_mux_ = rtcp_mux;
+ }
+
+ template <class C>
+ void SetCodecFeedbackParams(std::vector<C>* codecs,
+ const FeedbackParams& fb_params) {
+ for (size_t i = 0; i < codecs->size(); ++i) {
+ codecs->at(i).feedback_params = fb_params;
+ }
+ }
+
+ void ExpectRtcpFb() {
+ FeedbackParams params_nack_fir;
+ params_nack_fir.Add(FeedbackParam(cricket::kRtcpFbParamCcm,
+ cricket::kRtcpFbCcmParamFir));
+ params_nack_fir.Add(FeedbackParam(cricket::kRtcpFbParamNack));
+
+ FeedbackParams params_nack;
+ params_nack.Add(FeedbackParam(cricket::kRtcpFbParamNack));
+
+ expected_audio_fb_params_ = params_nack;
+ expected_video_fb_params_ = params_nack_fir;
+ expected_data_fb_params_ = params_nack;
+ }
+
+ private:
+ void OnSendStanza(cricket::SessionManager* manager,
+ const buzz::XmlElement* stanza) {
+ LOG(LS_INFO) << stanza->Str();
+ stanzas_.push_back(new buzz::XmlElement(*stanza));
+ }
+
+ void OnSessionCreate(cricket::Session* session, bool initiate) {
+ session->set_current_protocol(initial_protocol_);
+ }
+
+ void OnCallCreate(cricket::Call *call) {
+ call_ = call;
+ call->SignalMediaStreamsUpdate.connect(
+ this, &MediaSessionClientTest::OnMediaStreamsUpdate);
+ }
+
+ void OnCallDestroy(cricket::Call *call) {
+ call_ = NULL;
+ }
+
+ void OnMediaStreamsUpdate(cricket::Call *call,
+ cricket::Session *session,
+ const cricket::MediaStreams& added,
+ const cricket::MediaStreams& removed) {
+ last_streams_added_.CopyFrom(added);
+ last_streams_removed_.CopyFrom(removed);
+ }
+
+ talk_base::NetworkManager* nm_;
+ cricket::PortAllocator* pa_;
+ cricket::SessionManager* sm_;
+ cricket::FakeMediaEngine* fme_;
+ cricket::FakeDataEngine* fdme_;
+ cricket::MediaSessionClient* client_;
+
+ cricket::Call* call_;
+ std::vector<buzz::XmlElement* > stanzas_;
+ MediaSessionTestParser* parser_;
+ cricket::SignalingProtocol initial_protocol_;
+ bool expect_incoming_crypto_;
+ bool expect_outgoing_crypto_;
+ int expected_video_bandwidth_;
+ bool expected_video_rtcp_mux_;
+ FeedbackParams expected_audio_fb_params_;
+ FeedbackParams expected_video_fb_params_;
+ FeedbackParams expected_data_fb_params_;
+ cricket::MediaStreams last_streams_added_;
+ cricket::MediaStreams last_streams_removed_;
+};
+
+MediaSessionClientTest* GingleTest() {
+ return new MediaSessionClientTest(new GingleSessionTestParser(),
+ cricket::PROTOCOL_GINGLE);
+}
+
+MediaSessionClientTest* JingleTest() {
+ return new MediaSessionClientTest(new JingleSessionTestParser(),
+ cricket::PROTOCOL_JINGLE);
+}
+
+TEST(MediaSessionTest, JingleGoodInitiateWithRtcpFb) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+
+ cricket::CallOptions options = VideoCallOptions();
+ options.data_channel_type = cricket::DCT_SCTP;
+ test->ExpectRtcpFb();
+ test->TestGoodIncomingInitiate(
+ kJingleInitiateWithRtcpFb, options, elem.use());
+}
+
+TEST(MediaSessionTest, JingleGoodVideoInitiate) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->TestGoodIncomingInitiate(
+ kJingleVideoInitiate, VideoCallOptions(), elem.use());
+ test->TestCodecsOfVideoInitiate(elem.get());
+}
+
+TEST(MediaSessionTest, JingleGoodVideoInitiateWithBandwidth) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->ExpectVideoBandwidth(42000);
+ test->TestGoodIncomingInitiate(
+ kJingleVideoInitiateWithBandwidth, VideoCallOptions(), elem.use());
+}
+
+TEST(MediaSessionTest, JingleGoodVideoInitiateWithRtcpMux) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->ExpectVideoRtcpMux(true);
+ test->TestGoodIncomingInitiate(
+ kJingleVideoInitiateWithRtcpMux, VideoCallOptions(), elem.use());
+}
+
+TEST(MediaSessionTest, JingleGoodVideoInitiateWithRtpData) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ cricket::CallOptions options = VideoCallOptions();
+ options.data_channel_type = cricket::DCT_RTP;
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kJingleVideoInitiateWithRtpData, kJingleCryptoOffer),
+ options,
+ elem.use());
+}
+
+TEST(MediaSessionTest, JingleGoodVideoInitiateWithSctpData) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ cricket::CallOptions options = VideoCallOptions();
+ options.data_channel_type = cricket::DCT_SCTP;
+ test->TestGoodIncomingInitiate(kJingleVideoInitiateWithSctpData,
+ options,
+ elem.use());
+}
+
+TEST(MediaSessionTest, JingleRejectAudio) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ cricket::CallOptions options = VideoCallOptions();
+ options.has_audio = false;
+ options.data_channel_type = cricket::DCT_RTP;
+ test->TestRejectOffer(kJingleVideoInitiateWithRtpData, options, elem.use());
+}
+
+TEST(MediaSessionTest, JingleRejectVideo) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ cricket::CallOptions options = AudioCallOptions();
+ options.data_channel_type = cricket::DCT_RTP;
+ test->TestRejectOffer(kJingleVideoInitiateWithRtpData, options, elem.use());
+}
+
+TEST(MediaSessionTest, JingleRejectData) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->TestRejectOffer(
+ kJingleVideoInitiateWithRtpData, VideoCallOptions(), elem.use());
+}
+
+TEST(MediaSessionTest, JingleRejectVideoAndData) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->TestRejectOffer(
+ kJingleVideoInitiateWithRtpData, AudioCallOptions(), elem.use());
+}
+
+TEST(MediaSessionTest, JingleGoodInitiateAllSupportedAudioCodecs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->TestGoodIncomingInitiate(
+ kJingleInitiate, AudioCallOptions(), elem.use());
+ test->TestHasAllSupportedAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, JingleGoodInitiateDifferentPreferenceAudioCodecs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->TestGoodIncomingInitiate(
+ kJingleInitiateDifferentPreference, AudioCallOptions(), elem.use());
+ test->TestHasAllSupportedAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, JingleGoodInitiateSomeUnsupportedAudioCodecs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->TestGoodIncomingInitiate(
+ kJingleInitiateSomeUnsupported, AudioCallOptions(), elem.use());
+ test->TestHasAudioCodecsFromInitiateSomeUnsupported(elem.get());
+}
+
+TEST(MediaSessionTest, JingleGoodInitiateDynamicAudioCodecs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->TestGoodIncomingInitiate(
+ kJingleInitiateDynamicAudioCodecs, AudioCallOptions(), elem.use());
+ test->TestHasAudioCodecsFromInitiateDynamicAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, JingleGoodInitiateStaticAudioCodecs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->TestGoodIncomingInitiate(
+ kJingleInitiateStaticAudioCodecs, AudioCallOptions(), elem.use());
+ test->TestHasAudioCodecsFromInitiateStaticAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, JingleBadInitiateNoAudioCodecs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestBadIncomingInitiate(kJingleInitiateNoAudioCodecs);
+}
+
+TEST(MediaSessionTest, JingleBadInitiateNoSupportedAudioCodecs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestBadIncomingInitiate(kJingleInitiateNoSupportedAudioCodecs);
+}
+
+TEST(MediaSessionTest, JingleBadInitiateWrongClockrates) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestBadIncomingInitiate(kJingleInitiateWrongClockrates);
+}
+
+TEST(MediaSessionTest, JingleBadInitiateWrongChannels) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestBadIncomingInitiate(kJingleInitiateWrongChannels);
+}
+
+TEST(MediaSessionTest, JingleBadInitiateNoPayloadTypes) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestBadIncomingInitiate(kJingleInitiateNoPayloadTypes);
+}
+
+TEST(MediaSessionTest, JingleBadInitiateDynamicWithoutNames) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestBadIncomingInitiate(kJingleInitiateDynamicWithoutNames);
+}
+
+TEST(MediaSessionTest, JingleGoodOutgoingInitiate) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestGoodOutgoingInitiate(AudioCallOptions());
+}
+
+TEST(MediaSessionTest, JingleGoodOutgoingInitiateWithBandwidth) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ cricket::CallOptions options = VideoCallOptions();
+ options.video_bandwidth = 42000;
+ test->TestGoodOutgoingInitiate(options);
+}
+
+TEST(MediaSessionTest, JingleGoodOutgoingInitiateWithRtcpMux) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ cricket::CallOptions options = VideoCallOptions();
+ options.rtcp_mux_enabled = true;
+ test->TestGoodOutgoingInitiate(options);
+}
+
+TEST(MediaSessionTest, JingleGoodOutgoingInitiateWithRtpData) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ cricket::CallOptions options;
+ options.data_channel_type = cricket::DCT_RTP;
+ test->ExpectCrypto(cricket::SEC_ENABLED);
+ test->TestGoodOutgoingInitiate(options);
+}
+
+TEST(MediaSessionTest, JingleGoodOutgoingInitiateWithSctpData) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ cricket::CallOptions options;
+ options.data_channel_type = cricket::DCT_SCTP;
+ test->TestGoodOutgoingInitiate(options);
+}
+
+// Crypto related tests.
+
+// Offer has crypto but the session is not secured, just ignore it.
+TEST(MediaSessionTest, JingleInitiateWithCryptoIsIgnoredWhenNotSecured) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kJingleVideoInitiate, kJingleCryptoOffer),
+ VideoCallOptions(),
+ elem.use());
+}
+
+// Offer has crypto required but the session is not secure, fail.
+TEST(MediaSessionTest, JingleInitiateWithCryptoRequiredWhenNotSecured) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestBadIncomingInitiate(AddEncryption(kJingleVideoInitiate,
+ kJingleRequiredCryptoOffer));
+}
+
+// Offer has no crypto but the session is secure required, fail.
+TEST(MediaSessionTest, JingleInitiateWithNoCryptoFailsWhenSecureRequired) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->ExpectCrypto(cricket::SEC_REQUIRED);
+ test->TestBadIncomingInitiate(kJingleInitiate);
+}
+
+// Offer has crypto and session is secure, expect crypto in the answer.
+TEST(MediaSessionTest, JingleInitiateWithCryptoWhenSecureEnabled) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->ExpectCrypto(cricket::SEC_ENABLED);
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kJingleVideoInitiate, kJingleCryptoOffer),
+ VideoCallOptions(),
+ elem.use());
+}
+
+// Offer has crypto and session is secure required, expect crypto in
+// the answer.
+TEST(MediaSessionTest, JingleInitiateWithCryptoWhenSecureRequired) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->ExpectCrypto(cricket::SEC_REQUIRED);
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kJingleVideoInitiate, kJingleCryptoOffer),
+ VideoCallOptions(),
+ elem.use());
+}
+
+// Offer has unsupported crypto and session is secure, no crypto in
+// the answer.
+TEST(MediaSessionTest, JingleInitiateWithUnsupportedCrypto) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ test->MakeSignalingSecure(cricket::SEC_ENABLED);
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kJingleInitiate, kJingleUnsupportedCryptoOffer),
+ VideoCallOptions(),
+ elem.use());
+}
+
+// Offer has unsupported REQUIRED crypto and session is not secure, fail.
+TEST(MediaSessionTest, JingleInitiateWithRequiredUnsupportedCrypto) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestBadIncomingInitiate(
+ AddEncryption(kJingleInitiate, kJingleRequiredUnsupportedCryptoOffer));
+}
+
+// Offer has unsupported REQUIRED crypto and session is secure, fail.
+TEST(MediaSessionTest, JingleInitiateWithRequiredUnsupportedCryptoWhenSecure) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->MakeSignalingSecure(cricket::SEC_ENABLED);
+ test->TestBadIncomingInitiate(
+ AddEncryption(kJingleInitiate, kJingleRequiredUnsupportedCryptoOffer));
+}
+
+// Offer has unsupported REQUIRED crypto and session is required secure, fail.
+TEST(MediaSessionTest,
+ JingleInitiateWithRequiredUnsupportedCryptoWhenSecureRequired) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->MakeSignalingSecure(cricket::SEC_REQUIRED);
+ test->TestBadIncomingInitiate(
+ AddEncryption(kJingleInitiate, kJingleRequiredUnsupportedCryptoOffer));
+}
+
+
+TEST(MediaSessionTest, JingleGoodOutgoingInitiateWithCrypto) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->ExpectCrypto(cricket::SEC_ENABLED);
+ test->TestGoodOutgoingInitiate(AudioCallOptions());
+}
+
+TEST(MediaSessionTest, JingleGoodOutgoingInitiateWithCryptoRequired) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->ExpectCrypto(cricket::SEC_REQUIRED);
+ test->TestGoodOutgoingInitiate(AudioCallOptions());
+}
+
+TEST(MediaSessionTest, JingleIncomingAcceptWithSsrcs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ cricket::CallOptions options = VideoCallOptions();
+ options.is_muc = true;
+ test->TestIncomingAcceptWithSsrcs(kJingleAcceptWithSsrcs, options);
+}
+
+TEST(MediaSessionTest, JingleIncomingAcceptWithRtpDataSsrcs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ cricket::CallOptions options = VideoCallOptions();
+ options.is_muc = true;
+ options.data_channel_type = cricket::DCT_RTP;
+ test->TestIncomingAcceptWithSsrcs(kJingleAcceptWithRtpDataSsrcs, options);
+}
+
+TEST(MediaSessionTest, JingleIncomingAcceptWithSctpData) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ cricket::CallOptions options = VideoCallOptions();
+ options.is_muc = true;
+ options.data_channel_type = cricket::DCT_SCTP;
+ test->TestIncomingAcceptWithSsrcs(kJingleAcceptWithSctpData, options);
+}
+
+TEST(MediaSessionTest, JingleStreamsUpdateAndView) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestStreamsUpdateAndViewRequests();
+}
+
+TEST(MediaSessionTest, JingleSendVideoStreamUpdate) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(JingleTest());
+ test->TestSendVideoStreamUpdate();
+}
+
+// Gingle tests
+
+TEST(MediaSessionTest, GingleGoodVideoInitiate) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestGoodIncomingInitiate(
+ kGingleVideoInitiate, VideoCallOptions(), elem.use());
+ test->TestCodecsOfVideoInitiate(elem.get());
+}
+
+TEST(MediaSessionTest, GingleGoodVideoInitiateWithBandwidth) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->ExpectVideoBandwidth(42000);
+ test->TestGoodIncomingInitiate(
+ kGingleVideoInitiateWithBandwidth, VideoCallOptions(), elem.use());
+}
+
+TEST(MediaSessionTest, GingleGoodInitiateAllSupportedAudioCodecs) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestGoodIncomingInitiate(
+ kGingleInitiate, AudioCallOptions(), elem.use());
+ test->TestHasAllSupportedAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, GingleGoodInitiateAllSupportedAudioCodecsWithCrypto) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->ExpectCrypto(cricket::SEC_ENABLED);
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kGingleInitiate, kGingleCryptoOffer),
+ AudioCallOptions(),
+ elem.use());
+ test->TestHasAllSupportedAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, GingleGoodInitiateDifferentPreferenceAudioCodecs) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestGoodIncomingInitiate(
+ kGingleInitiateDifferentPreference, AudioCallOptions(), elem.use());
+ test->TestHasAllSupportedAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, GingleGoodInitiateSomeUnsupportedAudioCodecs) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestGoodIncomingInitiate(
+ kGingleInitiateSomeUnsupported, AudioCallOptions(), elem.use());
+ test->TestHasAudioCodecsFromInitiateSomeUnsupported(elem.get());
+}
+
+TEST(MediaSessionTest, GingleGoodInitiateDynamicAudioCodecs) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestGoodIncomingInitiate(
+ kGingleInitiateDynamicAudioCodecs, AudioCallOptions(), elem.use());
+ test->TestHasAudioCodecsFromInitiateDynamicAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, GingleGoodInitiateStaticAudioCodecs) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestGoodIncomingInitiate(
+ kGingleInitiateStaticAudioCodecs, AudioCallOptions(), elem.use());
+ test->TestHasAudioCodecsFromInitiateStaticAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, GingleGoodInitiateNoAudioCodecs) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestGoodIncomingInitiate(
+ kGingleInitiateNoAudioCodecs, AudioCallOptions(), elem.use());
+ test->TestHasDefaultAudioCodecs(elem.get());
+}
+
+TEST(MediaSessionTest, GingleBadInitiateNoSupportedAudioCodecs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestBadIncomingInitiate(kGingleInitiateNoSupportedAudioCodecs);
+}
+
+TEST(MediaSessionTest, GingleBadInitiateWrongClockrates) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestBadIncomingInitiate(kGingleInitiateWrongClockrates);
+}
+
+TEST(MediaSessionTest, GingleBadInitiateWrongChannels) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestBadIncomingInitiate(kGingleInitiateWrongChannels);
+}
+
+
+TEST(MediaSessionTest, GingleBadInitiateNoPayloadTypes) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestBadIncomingInitiate(kGingleInitiateNoPayloadTypes);
+}
+
+TEST(MediaSessionTest, GingleBadInitiateDynamicWithoutNames) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestBadIncomingInitiate(kGingleInitiateDynamicWithoutNames);
+}
+
+TEST(MediaSessionTest, GingleGoodOutgoingInitiate) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestGoodOutgoingInitiate(AudioCallOptions());
+}
+
+TEST(MediaSessionTest, GingleGoodOutgoingInitiateWithBandwidth) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ cricket::CallOptions options = VideoCallOptions();
+ options.video_bandwidth = 42000;
+ test->TestGoodOutgoingInitiate(options);
+}
+
+// Crypto related tests.
+
+// Offer has crypto but the session is not secured, just ignore it.
+TEST(MediaSessionTest, GingleInitiateWithCryptoIsIgnoredWhenNotSecured) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kGingleInitiate, kGingleCryptoOffer),
+ VideoCallOptions(),
+ elem.use());
+}
+
+// Offer has crypto required but the session is not secure, fail.
+TEST(MediaSessionTest, GingleInitiateWithCryptoRequiredWhenNotSecured) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestBadIncomingInitiate(AddEncryption(kGingleInitiate,
+ kGingleRequiredCryptoOffer));
+}
+
+// Offer has no crypto but the session is secure required, fail.
+TEST(MediaSessionTest, GingleInitiateWithNoCryptoFailsWhenSecureRequired) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->ExpectCrypto(cricket::SEC_REQUIRED);
+ test->TestBadIncomingInitiate(kGingleInitiate);
+}
+
+// Offer has crypto and session is secure, expect crypto in the answer.
+TEST(MediaSessionTest, GingleInitiateWithCryptoWhenSecureEnabled) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->ExpectCrypto(cricket::SEC_ENABLED);
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kGingleInitiate, kGingleCryptoOffer),
+ VideoCallOptions(),
+ elem.use());
+}
+
+// Offer has crypto and session is secure required, expect crypto in
+// the answer.
+TEST(MediaSessionTest, GingleInitiateWithCryptoWhenSecureRequired) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->ExpectCrypto(cricket::SEC_REQUIRED);
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kGingleInitiate, kGingleCryptoOffer),
+ VideoCallOptions(),
+ elem.use());
+}
+
+// Offer has unsupported crypto and session is secure, no crypto in
+// the answer.
+TEST(MediaSessionTest, GingleInitiateWithUnsupportedCrypto) {
+ talk_base::scoped_ptr<buzz::XmlElement> elem;
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->MakeSignalingSecure(cricket::SEC_ENABLED);
+ test->TestGoodIncomingInitiate(
+ AddEncryption(kGingleInitiate, kGingleUnsupportedCryptoOffer),
+ VideoCallOptions(),
+ elem.use());
+}
+
+// Offer has unsupported REQUIRED crypto and session is not secure, fail.
+TEST(MediaSessionTest, GingleInitiateWithRequiredUnsupportedCrypto) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->TestBadIncomingInitiate(
+ AddEncryption(kGingleInitiate, kGingleRequiredUnsupportedCryptoOffer));
+}
+
+// Offer has unsupported REQUIRED crypto and session is secure, fail.
+TEST(MediaSessionTest, GingleInitiateWithRequiredUnsupportedCryptoWhenSecure) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->MakeSignalingSecure(cricket::SEC_ENABLED);
+ test->TestBadIncomingInitiate(
+ AddEncryption(kGingleInitiate, kGingleRequiredUnsupportedCryptoOffer));
+}
+
+// Offer has unsupported REQUIRED crypto and session is required secure, fail.
+TEST(MediaSessionTest,
+ GingleInitiateWithRequiredUnsupportedCryptoWhenSecureRequired) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->MakeSignalingSecure(cricket::SEC_REQUIRED);
+ test->TestBadIncomingInitiate(
+ AddEncryption(kGingleInitiate, kGingleRequiredUnsupportedCryptoOffer));
+}
+
+TEST(MediaSessionTest, GingleGoodOutgoingInitiateWithCrypto) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->ExpectCrypto(cricket::SEC_ENABLED);
+ test->TestGoodOutgoingInitiate(AudioCallOptions());
+}
+
+TEST(MediaSessionTest, GingleGoodOutgoingInitiateWithCryptoRequired) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ test->ExpectCrypto(cricket::SEC_REQUIRED);
+ test->TestGoodOutgoingInitiate(AudioCallOptions());
+}
+
+TEST(MediaSessionTest, GingleIncomingAcceptWithSsrcs) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ cricket::CallOptions options = VideoCallOptions();
+ options.is_muc = true;
+ test->TestIncomingAcceptWithSsrcs(kGingleAcceptWithSsrcs, options);
+}
+
+TEST(MediaSessionTest, GingleGoodOutgoingInitiateWithRtpData) {
+ talk_base::scoped_ptr<MediaSessionClientTest> test(GingleTest());
+ cricket::CallOptions options;
+ options.data_channel_type = cricket::DCT_RTP;
+ test->ExpectCrypto(cricket::SEC_ENABLED);
+ test->TestGoodOutgoingInitiate(options);
+}
diff --git a/chromium/third_party/libjingle/source/talk/session/media/mediasink.h b/chromium/third_party/libjingle/source/talk/session/media/mediasink.h
new file mode 100644
index 00000000000..fb0e06be886
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/mediasink.h
@@ -0,0 +1,48 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_MEDIASINK_H_
+#define TALK_SESSION_MEDIA_MEDIASINK_H_
+
+namespace cricket {
+
+// MediaSinkInterface is a sink to handle RTP and RTCP packets that are sent or
+// received by a channel.
+class MediaSinkInterface {
+ public:
+ virtual ~MediaSinkInterface() {}
+
+ virtual void SetMaxSize(size_t size) = 0;
+ virtual bool Enable(bool enable) = 0;
+ virtual bool IsEnabled() const = 0;
+ virtual void OnPacket(const void* data, size_t size, bool rtcp) = 0;
+ virtual void set_packet_filter(int filter) = 0;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_MEDIASINK_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter.cc b/chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter.cc
new file mode 100644
index 00000000000..7091952fd77
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter.cc
@@ -0,0 +1,132 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/rtcpmuxfilter.h"
+
+#include "talk/base/logging.h"
+
+namespace cricket {
+
+RtcpMuxFilter::RtcpMuxFilter() : state_(ST_INIT), offer_enable_(false) {
+}
+
+bool RtcpMuxFilter::IsActive() const {
+ return state_ == ST_SENTPRANSWER ||
+ state_ == ST_RECEIVEDPRANSWER ||
+ state_ == ST_ACTIVE;
+}
+
+bool RtcpMuxFilter::SetOffer(bool offer_enable, ContentSource src) {
+ if (!ExpectOffer(offer_enable, src)) {
+ LOG(LS_ERROR) << "Invalid state for change of RTCP mux offer";
+ return false;
+ }
+
+ offer_enable_ = offer_enable;
+ state_ = (src == CS_LOCAL) ? ST_SENTOFFER : ST_RECEIVEDOFFER;
+ return true;
+}
+
+bool RtcpMuxFilter::SetProvisionalAnswer(bool answer_enable,
+ ContentSource src) {
+ if (!ExpectAnswer(src)) {
+ LOG(LS_ERROR) << "Invalid state for RTCP mux provisional answer";
+ return false;
+ }
+
+ if (offer_enable_) {
+ if (answer_enable) {
+ if (src == CS_REMOTE)
+ state_ = ST_RECEIVEDPRANSWER;
+ else // CS_LOCAL
+ state_ = ST_SENTPRANSWER;
+ } else {
+ // The provisional answer doesn't want to use RTCP mux.
+ // Go back to the original state after the offer was set and wait for next
+ // provisional or final answer.
+ if (src == CS_REMOTE)
+ state_ = ST_SENTOFFER;
+ else // CS_LOCAL
+ state_ = ST_RECEIVEDOFFER;
+ }
+ } else if (answer_enable) {
+ // If the offer didn't specify RTCP mux, the answer shouldn't either.
+ LOG(LS_WARNING) << "Invalid parameters in RTCP mux provisional answer";
+ return false;
+ }
+
+ return true;
+}
+
+bool RtcpMuxFilter::SetAnswer(bool answer_enable, ContentSource src) {
+ if (!ExpectAnswer(src)) {
+ LOG(LS_ERROR) << "Invalid state for RTCP mux answer";
+ return false;
+ }
+
+ if (offer_enable_ && answer_enable) {
+ state_ = ST_ACTIVE;
+ } else if (answer_enable) {
+ // If the offer didn't specify RTCP mux, the answer shouldn't either.
+ LOG(LS_WARNING) << "Invalid parameters in RTCP mux answer";
+ return false;
+ } else {
+ state_ = ST_INIT;
+ }
+ return true;
+}
+
+bool RtcpMuxFilter::DemuxRtcp(const char* data, int len) {
+ // If we're muxing RTP/RTCP, we must inspect each packet delivered and
+ // determine whether it is RTP or RTCP. We do so by checking the packet type,
+ // and assuming RTP if type is 0-63 or 96-127. For additional details, see
+ // http://tools.ietf.org/html/rfc5761.
+ // Note that if we offer RTCP mux, we may receive muxed RTCP before we
+ // receive the answer, so we operate in that state too.
+ if (!offer_enable_ || state_ < ST_SENTOFFER) {
+ return false;
+ }
+
+ int type = (len >= 2) ? (static_cast<uint8>(data[1]) & 0x7F) : 0;
+ return (type >= 64 && type < 96);
+}
+
+bool RtcpMuxFilter::ExpectOffer(bool offer_enable, ContentSource source) {
+ return ((state_ == ST_INIT) ||
+ (state_ == ST_ACTIVE && offer_enable == offer_enable_) ||
+ (state_ == ST_SENTOFFER && source == CS_LOCAL) ||
+ (state_ == ST_RECEIVEDOFFER && source == CS_REMOTE));
+}
+
+bool RtcpMuxFilter::ExpectAnswer(ContentSource source) {
+ return ((state_ == ST_SENTOFFER && source == CS_REMOTE) ||
+ (state_ == ST_RECEIVEDOFFER && source == CS_LOCAL) ||
+ (state_ == ST_SENTPRANSWER && source == CS_LOCAL) ||
+ (state_ == ST_RECEIVEDPRANSWER && source == CS_REMOTE));
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter.h b/chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter.h
new file mode 100644
index 00000000000..a5bb85e3c02
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter.h
@@ -0,0 +1,86 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_RTCPMUXFILTER_H_
+#define TALK_SESSION_MEDIA_RTCPMUXFILTER_H_
+
+#include "talk/base/basictypes.h"
+#include "talk/p2p/base/sessiondescription.h"
+
+namespace cricket {
+
+// RTCP Muxer, as defined in RFC 5761 (http://tools.ietf.org/html/rfc5761)
+class RtcpMuxFilter {
+ public:
+ RtcpMuxFilter();
+
+ // Whether the filter is active, i.e. has RTCP mux been properly negotiated.
+ bool IsActive() const;
+
+ // Specifies whether the offer indicates the use of RTCP mux.
+ bool SetOffer(bool offer_enable, ContentSource src);
+
+ // Specifies whether the provisional answer indicates the use of RTCP mux.
+ bool SetProvisionalAnswer(bool answer_enable, ContentSource src);
+
+ // Specifies whether the answer indicates the use of RTCP mux.
+ bool SetAnswer(bool answer_enable, ContentSource src);
+
+ // Determines whether the specified packet is RTCP.
+ bool DemuxRtcp(const char* data, int len);
+
+ private:
+ bool ExpectOffer(bool offer_enable, ContentSource source);
+ bool ExpectAnswer(ContentSource source);
+ enum State {
+ // RTCP mux filter unused.
+ ST_INIT,
+ // Offer with RTCP mux enabled received.
+ // RTCP mux filter is not active.
+ ST_RECEIVEDOFFER,
+ // Offer with RTCP mux enabled sent.
+ // RTCP mux filter can demux incoming packets but is not active.
+ ST_SENTOFFER,
+ // RTCP mux filter is active but the sent answer is only provisional.
+ // When the final answer is set, the state transitions to ST_ACTIVE or
+ // ST_INIT.
+ ST_SENTPRANSWER,
+ // RTCP mux filter is active but the received answer is only provisional.
+ // When the final answer is set, the state transitions to ST_ACTIVE or
+ // ST_INIT.
+ ST_RECEIVEDPRANSWER,
+ // Offer and answer set, RTCP mux enabled. It is not possible to de-activate
+ // the filter.
+ ST_ACTIVE
+ };
+ State state_;
+ bool offer_enable_;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_RTCPMUXFILTER_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter_unittest.cc
new file mode 100644
index 00000000000..ad3349838f1
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/rtcpmuxfilter_unittest.cc
@@ -0,0 +1,212 @@
+// libjingle
+// Copyright 2011 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "talk/session/media/rtcpmuxfilter.h"
+
+#include "talk/base/gunit.h"
+#include "talk/media/base/testutils.h"
+
+TEST(RtcpMuxFilterTest, DemuxRtcpSender) {
+ cricket::RtcpMuxFilter filter;
+ const char data[] = { 0, 73, 0, 0 };
+ const int len = 4;
+
+ // Init state - refuse to demux
+ EXPECT_FALSE(filter.DemuxRtcp(data, len));
+ // After sent offer, demux should be enabled
+ filter.SetOffer(true, cricket::CS_LOCAL);
+ EXPECT_TRUE(filter.DemuxRtcp(data, len));
+ // Remote accepted, demux should be enabled
+ filter.SetAnswer(true, cricket::CS_REMOTE);
+ EXPECT_TRUE(filter.DemuxRtcp(data, len));
+}
+
+TEST(RtcpMuxFilterTest, DemuxRtcpReceiver) {
+ cricket::RtcpMuxFilter filter;
+ const char data[] = { 0, 73, 0, 0 };
+ const int len = 4;
+
+ // Init state - refuse to demux
+ EXPECT_FALSE(filter.DemuxRtcp(data, len));
+ // After received offer, demux should not be enabled
+ filter.SetOffer(true, cricket::CS_REMOTE);
+ EXPECT_FALSE(filter.DemuxRtcp(data, len));
+ // We accept, demux is now enabled
+ filter.SetAnswer(true, cricket::CS_LOCAL);
+ EXPECT_TRUE(filter.DemuxRtcp(data, len));
+}
+
+TEST(RtcpMuxFilterTest, DemuxRtcpSenderProvisionalAnswer) {
+ cricket::RtcpMuxFilter filter;
+ const char data[] = { 0, 73, 0, 0 };
+ const int len = 4;
+
+ filter.SetOffer(true, cricket::CS_REMOTE);
+ // Received provisional answer without mux enabled.
+ filter.SetProvisionalAnswer(false, cricket::CS_LOCAL);
+ EXPECT_FALSE(filter.DemuxRtcp(data, len));
+ // Received provisional answer with mux enabled.
+ filter.SetProvisionalAnswer(true, cricket::CS_LOCAL);
+ EXPECT_TRUE(filter.DemuxRtcp(data, len));
+ // Remote accepted, demux should be enabled.
+ filter.SetAnswer(true, cricket::CS_LOCAL);
+ EXPECT_TRUE(filter.DemuxRtcp(data, len));
+}
+
+TEST(RtcpMuxFilterTest, DemuxRtcpReceiverProvisionalAnswer) {
+ cricket::RtcpMuxFilter filter;
+ const char data[] = { 0, 73, 0, 0 };
+ const int len = 4;
+
+ filter.SetOffer(true, cricket::CS_LOCAL);
+ // Received provisional answer without mux enabled.
+ filter.SetProvisionalAnswer(false, cricket::CS_REMOTE);
+ // After sent offer, demux should be enabled until we have received a
+ // final answer.
+ EXPECT_TRUE(filter.DemuxRtcp(data, len));
+ // Received provisional answer with mux enabled.
+ filter.SetProvisionalAnswer(true, cricket::CS_REMOTE);
+ EXPECT_TRUE(filter.DemuxRtcp(data, len));
+ // Remote accepted, demux should be enabled.
+ filter.SetAnswer(true, cricket::CS_REMOTE);
+ EXPECT_TRUE(filter.DemuxRtcp(data, len));
+}
+
+TEST(RtcpMuxFilterTest, IsActiveSender) {
+ cricket::RtcpMuxFilter filter;
+ // Init state - not active
+ EXPECT_FALSE(filter.IsActive());
+ // After sent offer, demux should not be active.
+ filter.SetOffer(true, cricket::CS_LOCAL);
+ EXPECT_FALSE(filter.IsActive());
+ // Remote accepted, filter is now active.
+ filter.SetAnswer(true, cricket::CS_REMOTE);
+ EXPECT_TRUE(filter.IsActive());
+}
+
+// Test that we can receive provisional answer and final answer.
+TEST(RtcpMuxFilterTest, ReceivePrAnswer) {
+ cricket::RtcpMuxFilter filter;
+ filter.SetOffer(true, cricket::CS_LOCAL);
+ // Received provisional answer with mux enabled.
+ EXPECT_TRUE(filter.SetProvisionalAnswer(true, cricket::CS_REMOTE));
+ // We are now active since both sender and receiver support mux.
+ EXPECT_TRUE(filter.IsActive());
+ // Received provisional answer with mux disabled.
+ EXPECT_TRUE(filter.SetProvisionalAnswer(false, cricket::CS_REMOTE));
+ // We are now inactive since the receiver doesn't support mux.
+ EXPECT_FALSE(filter.IsActive());
+ // Received final answer with mux enabled.
+ EXPECT_TRUE(filter.SetAnswer(true, cricket::CS_REMOTE));
+ EXPECT_TRUE(filter.IsActive());
+}
+
+TEST(RtcpMuxFilterTest, IsActiveReceiver) {
+ cricket::RtcpMuxFilter filter;
+ // Init state - not active.
+ EXPECT_FALSE(filter.IsActive());
+ // After received offer, demux should not be active
+ filter.SetOffer(true, cricket::CS_REMOTE);
+ EXPECT_FALSE(filter.IsActive());
+ // We accept, filter is now active
+ filter.SetAnswer(true, cricket::CS_LOCAL);
+ EXPECT_TRUE(filter.IsActive());
+}
+
+// Test that we can send provisional answer and final answer.
+TEST(RtcpMuxFilterTest, SendPrAnswer) {
+ cricket::RtcpMuxFilter filter;
+ filter.SetOffer(true, cricket::CS_REMOTE);
+ // Send provisional answer with mux enabled.
+ EXPECT_TRUE(filter.SetProvisionalAnswer(true, cricket::CS_LOCAL));
+ EXPECT_TRUE(filter.IsActive());
+ // Received provisional answer with mux disabled.
+ EXPECT_TRUE(filter.SetProvisionalAnswer(false, cricket::CS_LOCAL));
+ EXPECT_FALSE(filter.IsActive());
+ // Send final answer with mux enabled.
+ EXPECT_TRUE(filter.SetAnswer(true, cricket::CS_LOCAL));
+ EXPECT_TRUE(filter.IsActive());
+}
+
+// Test that we can enable the filter in an update.
+// We can not disable the filter later since that would mean we need to
+// recreate a rtcp transport channel.
+TEST(RtcpMuxFilterTest, EnableFilterDuringUpdate) {
+ cricket::RtcpMuxFilter filter;
+ EXPECT_FALSE(filter.IsActive());
+ EXPECT_TRUE(filter.SetOffer(false, cricket::CS_REMOTE));
+ EXPECT_TRUE(filter.SetAnswer(false, cricket::CS_LOCAL));
+ EXPECT_FALSE(filter.IsActive());
+
+ EXPECT_TRUE(filter.SetOffer(true, cricket::CS_REMOTE));
+ EXPECT_TRUE(filter.SetAnswer(true, cricket::CS_LOCAL));
+ EXPECT_TRUE(filter.IsActive());
+
+ EXPECT_FALSE(filter.SetOffer(false, cricket::CS_REMOTE));
+ EXPECT_FALSE(filter.SetAnswer(false, cricket::CS_LOCAL));
+ EXPECT_TRUE(filter.IsActive());
+}
+
+// Test that SetOffer can be called twice.
+TEST(RtcpMuxFilterTest, SetOfferTwice) {
+ cricket::RtcpMuxFilter filter;
+
+ EXPECT_TRUE(filter.SetOffer(true, cricket::CS_REMOTE));
+ EXPECT_TRUE(filter.SetOffer(true, cricket::CS_REMOTE));
+ EXPECT_TRUE(filter.SetAnswer(true, cricket::CS_LOCAL));
+ EXPECT_TRUE(filter.IsActive());
+
+ cricket::RtcpMuxFilter filter2;
+ EXPECT_TRUE(filter2.SetOffer(false, cricket::CS_LOCAL));
+ EXPECT_TRUE(filter2.SetOffer(false, cricket::CS_LOCAL));
+ EXPECT_TRUE(filter2.SetAnswer(false, cricket::CS_REMOTE));
+ EXPECT_FALSE(filter2.IsActive());
+}
+
+// Test that the filter can be enabled twice.
+TEST(RtcpMuxFilterTest, EnableFilterTwiceDuringUpdate) {
+ cricket::RtcpMuxFilter filter;
+
+ EXPECT_TRUE(filter.SetOffer(true, cricket::CS_REMOTE));
+ EXPECT_TRUE(filter.SetAnswer(true, cricket::CS_LOCAL));
+ EXPECT_TRUE(filter.IsActive());
+
+ EXPECT_TRUE(filter.SetOffer(true, cricket::CS_REMOTE));
+ EXPECT_TRUE(filter.SetAnswer(true, cricket::CS_LOCAL));
+ EXPECT_TRUE(filter.IsActive());
+}
+
+// Test that the filter can be kept disabled during updates.
+TEST(RtcpMuxFilterTest, KeepFilterDisabledDuringUpdate) {
+ cricket::RtcpMuxFilter filter;
+
+ EXPECT_TRUE(filter.SetOffer(false, cricket::CS_REMOTE));
+ EXPECT_TRUE(filter.SetAnswer(false, cricket::CS_LOCAL));
+ EXPECT_FALSE(filter.IsActive());
+
+ EXPECT_TRUE(filter.SetOffer(false, cricket::CS_REMOTE));
+ EXPECT_TRUE(filter.SetAnswer(false, cricket::CS_LOCAL));
+ EXPECT_FALSE(filter.IsActive());
+}
diff --git a/chromium/third_party/libjingle/source/talk/session/media/soundclip.cc b/chromium/third_party/libjingle/source/talk/session/media/soundclip.cc
new file mode 100644
index 00000000000..44f457cda5c
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/soundclip.cc
@@ -0,0 +1,82 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/soundclip.h"
+
+namespace cricket {
+
+enum {
+ MSG_PLAYSOUND = 1,
+};
+
+struct PlaySoundMessageData : talk_base::MessageData {
+ PlaySoundMessageData(const void *c,
+ int l,
+ SoundclipMedia::SoundclipFlags f)
+ : clip(c),
+ len(l),
+ flags(f),
+ result(false) {
+ }
+
+ const void *clip;
+ int len;
+ SoundclipMedia::SoundclipFlags flags;
+ bool result;
+};
+
+Soundclip::Soundclip(talk_base::Thread *thread, SoundclipMedia *soundclip_media)
+ : worker_thread_(thread),
+ soundclip_media_(soundclip_media) {
+}
+
+bool Soundclip::PlaySound(const void *clip,
+ int len,
+ SoundclipMedia::SoundclipFlags flags) {
+ PlaySoundMessageData data(clip, len, flags);
+ worker_thread_->Send(this, MSG_PLAYSOUND, &data);
+ return data.result;
+}
+
+bool Soundclip::PlaySound_w(const void *clip,
+ int len,
+ SoundclipMedia::SoundclipFlags flags) {
+ return soundclip_media_->PlaySound(static_cast<const char *>(clip),
+ len,
+ flags);
+}
+
+void Soundclip::OnMessage(talk_base::Message *message) {
+ ASSERT(message->message_id == MSG_PLAYSOUND);
+ PlaySoundMessageData *data =
+ static_cast<PlaySoundMessageData *>(message->pdata);
+ data->result = PlaySound_w(data->clip,
+ data->len,
+ data->flags);
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/soundclip.h b/chromium/third_party/libjingle/source/talk/session/media/soundclip.h
new file mode 100644
index 00000000000..f057d8de3ee
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/soundclip.h
@@ -0,0 +1,70 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_SOUNDCLIP_H_
+#define TALK_SESSION_MEDIA_SOUNDCLIP_H_
+
+#include "talk/base/scoped_ptr.h"
+#include "talk/media/base/mediaengine.h"
+
+namespace talk_base {
+
+class Thread;
+
+}
+
+namespace cricket {
+
+// Soundclip wraps SoundclipMedia to support marshalling calls to the proper
+// thread.
+class Soundclip : private talk_base::MessageHandler {
+ public:
+ Soundclip(talk_base::Thread* thread, SoundclipMedia* soundclip_media);
+
+ // Plays a sound out to the speakers with the given audio stream. The stream
+ // must be 16-bit little-endian 16 kHz PCM. If a stream is already playing
+ // on this Soundclip, it is stopped. If clip is NULL, nothing is played.
+ // Returns whether it was successful.
+ bool PlaySound(const void* clip,
+ int len,
+ SoundclipMedia::SoundclipFlags flags);
+
+ private:
+ bool PlaySound_w(const void* clip,
+ int len,
+ SoundclipMedia::SoundclipFlags flags);
+
+ // From MessageHandler
+ virtual void OnMessage(talk_base::Message* message);
+
+ talk_base::Thread* worker_thread_;
+ talk_base::scoped_ptr<SoundclipMedia> soundclip_media_;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_SOUNDCLIP_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/srtpfilter.cc b/chromium/third_party/libjingle/source/talk/session/media/srtpfilter.cc
new file mode 100644
index 00000000000..8e1c2c1c4fd
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/srtpfilter.cc
@@ -0,0 +1,825 @@
+/*
+ * libjingle
+ * Copyright 2009 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#undef HAVE_CONFIG_H
+
+#include "talk/session/media/srtpfilter.h"
+
+#include <algorithm>
+#include <cstring>
+
+#include "talk/base/base64.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringencode.h"
+#include "talk/base/timeutils.h"
+#include "talk/media/base/rtputils.h"
+
+// Enable this line to turn on SRTP debugging
+// #define SRTP_DEBUG
+
+#ifdef HAVE_SRTP
+#ifdef SRTP_RELATIVE_PATH
+#include "srtp.h" // NOLINT
+#else
+#include "third_party/libsrtp/include/srtp.h"
+#endif // SRTP_RELATIVE_PATH
+#ifdef _DEBUG
+extern "C" debug_module_t mod_srtp;
+extern "C" debug_module_t mod_auth;
+extern "C" debug_module_t mod_cipher;
+extern "C" debug_module_t mod_stat;
+extern "C" debug_module_t mod_alloc;
+extern "C" debug_module_t mod_aes_icm;
+extern "C" debug_module_t mod_aes_hmac;
+#endif
+#else
+// SrtpFilter needs that constant.
+#define SRTP_MASTER_KEY_LEN 30
+#endif // HAVE_SRTP
+
+namespace cricket {
+
+const char CS_AES_CM_128_HMAC_SHA1_80[] = "AES_CM_128_HMAC_SHA1_80";
+const char CS_AES_CM_128_HMAC_SHA1_32[] = "AES_CM_128_HMAC_SHA1_32";
+const int SRTP_MASTER_KEY_BASE64_LEN = SRTP_MASTER_KEY_LEN * 4 / 3;
+const int SRTP_MASTER_KEY_KEY_LEN = 16;
+const int SRTP_MASTER_KEY_SALT_LEN = 14;
+
+#ifndef HAVE_SRTP
+
+// This helper function is used on systems that don't (yet) have SRTP,
+// to log that the functions that require it won't do anything.
+namespace {
+bool SrtpNotAvailable(const char *func) {
+ LOG(LS_ERROR) << func << ": SRTP is not available on your system.";
+ return false;
+}
+} // anonymous namespace
+
+#endif // !HAVE_SRTP
+
+void EnableSrtpDebugging() {
+#ifdef HAVE_SRTP
+#ifdef _DEBUG
+ debug_on(mod_srtp);
+ debug_on(mod_auth);
+ debug_on(mod_cipher);
+ debug_on(mod_stat);
+ debug_on(mod_alloc);
+ debug_on(mod_aes_icm);
+ // debug_on(mod_aes_cbc);
+ // debug_on(mod_hmac);
+#endif
+#endif // HAVE_SRTP
+}
+
+// NOTE: This is called from ChannelManager D'tor.
+void ShutdownSrtp() {
+#ifdef HAVE_SRTP
+ // If srtp_dealloc is not executed then this will clear all existing sessions.
+ // This should be called when application is shutting down.
+ SrtpSession::Terminate();
+#endif
+}
+
+SrtpFilter::SrtpFilter()
+ : state_(ST_INIT),
+ signal_silent_time_in_ms_(0) {
+}
+
+SrtpFilter::~SrtpFilter() {
+}
+
+bool SrtpFilter::IsActive() const {
+ return state_ >= ST_ACTIVE;
+}
+
+bool SrtpFilter::SetOffer(const std::vector<CryptoParams>& offer_params,
+ ContentSource source) {
+ if (!ExpectOffer(source)) {
+ LOG(LS_ERROR) << "Wrong state to update SRTP offer";
+ return false;
+ }
+ return StoreParams(offer_params, source);
+}
+
+bool SrtpFilter::SetAnswer(const std::vector<CryptoParams>& answer_params,
+ ContentSource source) {
+ return DoSetAnswer(answer_params, source, true);
+}
+
+bool SrtpFilter::SetProvisionalAnswer(
+ const std::vector<CryptoParams>& answer_params,
+ ContentSource source) {
+ return DoSetAnswer(answer_params, source, false);
+}
+
+bool SrtpFilter::SetRtpParams(const std::string& send_cs,
+ const uint8* send_key, int send_key_len,
+ const std::string& recv_cs,
+ const uint8* recv_key, int recv_key_len) {
+ if (state_ == ST_ACTIVE) {
+ LOG(LS_ERROR) << "Tried to set SRTP Params when filter already active";
+ return false;
+ }
+ CreateSrtpSessions();
+ if (!send_session_->SetSend(send_cs, send_key, send_key_len))
+ return false;
+
+ if (!recv_session_->SetRecv(recv_cs, recv_key, recv_key_len))
+ return false;
+
+ state_ = ST_ACTIVE;
+
+ LOG(LS_INFO) << "SRTP activated with negotiated parameters:"
+ << " send cipher_suite " << send_cs
+ << " recv cipher_suite " << recv_cs;
+
+ return true;
+}
+
+// This function is provided separately because DTLS-SRTP behaves
+// differently in RTP/RTCP mux and non-mux modes.
+//
+// - In the non-muxed case, RTP and RTCP are keyed with different
+// keys (from different DTLS handshakes), and so we need a new
+// SrtpSession.
+// - In the muxed case, they are keyed with the same keys, so
+// this function is not needed
+bool SrtpFilter::SetRtcpParams(const std::string& send_cs,
+ const uint8* send_key, int send_key_len,
+ const std::string& recv_cs,
+ const uint8* recv_key, int recv_key_len) {
+ // This can only be called once, but can be safely called after
+ // SetRtpParams
+ if (send_rtcp_session_ || recv_rtcp_session_) {
+ LOG(LS_ERROR) << "Tried to set SRTCP Params when filter already active";
+ return false;
+ }
+
+ send_rtcp_session_.reset(new SrtpSession());
+ SignalSrtpError.repeat(send_rtcp_session_->SignalSrtpError);
+ send_rtcp_session_->set_signal_silent_time(signal_silent_time_in_ms_);
+ if (!send_rtcp_session_->SetRecv(send_cs, send_key, send_key_len))
+ return false;
+
+ recv_rtcp_session_.reset(new SrtpSession());
+ SignalSrtpError.repeat(recv_rtcp_session_->SignalSrtpError);
+ recv_rtcp_session_->set_signal_silent_time(signal_silent_time_in_ms_);
+ if (!recv_rtcp_session_->SetRecv(recv_cs, recv_key, recv_key_len))
+ return false;
+
+ LOG(LS_INFO) << "SRTCP activated with negotiated parameters:"
+ << " send cipher_suite " << send_cs
+ << " recv cipher_suite " << recv_cs;
+
+ return true;
+}
+
+bool SrtpFilter::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
+ if (!IsActive()) {
+ LOG(LS_WARNING) << "Failed to ProtectRtp: SRTP not active";
+ return false;
+ }
+ return send_session_->ProtectRtp(p, in_len, max_len, out_len);
+}
+
+bool SrtpFilter::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
+ if (!IsActive()) {
+ LOG(LS_WARNING) << "Failed to ProtectRtcp: SRTP not active";
+ return false;
+ }
+ if (send_rtcp_session_) {
+ return send_rtcp_session_->ProtectRtcp(p, in_len, max_len, out_len);
+ } else {
+ return send_session_->ProtectRtcp(p, in_len, max_len, out_len);
+ }
+}
+
+bool SrtpFilter::UnprotectRtp(void* p, int in_len, int* out_len) {
+ if (!IsActive()) {
+ LOG(LS_WARNING) << "Failed to UnprotectRtp: SRTP not active";
+ return false;
+ }
+ return recv_session_->UnprotectRtp(p, in_len, out_len);
+}
+
+bool SrtpFilter::UnprotectRtcp(void* p, int in_len, int* out_len) {
+ if (!IsActive()) {
+ LOG(LS_WARNING) << "Failed to UnprotectRtcp: SRTP not active";
+ return false;
+ }
+ if (recv_rtcp_session_) {
+ return recv_rtcp_session_->UnprotectRtcp(p, in_len, out_len);
+ } else {
+ return recv_session_->UnprotectRtcp(p, in_len, out_len);
+ }
+}
+
+void SrtpFilter::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
+ signal_silent_time_in_ms_ = signal_silent_time_in_ms;
+ if (state_ == ST_ACTIVE) {
+ send_session_->set_signal_silent_time(signal_silent_time_in_ms);
+ recv_session_->set_signal_silent_time(signal_silent_time_in_ms);
+ if (send_rtcp_session_)
+ send_rtcp_session_->set_signal_silent_time(signal_silent_time_in_ms);
+ if (recv_rtcp_session_)
+ recv_rtcp_session_->set_signal_silent_time(signal_silent_time_in_ms);
+ }
+}
+
+bool SrtpFilter::ExpectOffer(ContentSource source) {
+ return ((state_ == ST_INIT) ||
+ (state_ == ST_ACTIVE) ||
+ (state_ == ST_SENTOFFER && source == CS_LOCAL) ||
+ (state_ == ST_SENTUPDATEDOFFER && source == CS_LOCAL) ||
+ (state_ == ST_RECEIVEDOFFER && source == CS_REMOTE) ||
+ (state_ == ST_RECEIVEDUPDATEDOFFER && source == CS_REMOTE));
+}
+
+bool SrtpFilter::StoreParams(const std::vector<CryptoParams>& params,
+ ContentSource source) {
+ offer_params_ = params;
+ if (state_ == ST_INIT) {
+ state_ = (source == CS_LOCAL) ? ST_SENTOFFER : ST_RECEIVEDOFFER;
+ } else { // state >= ST_ACTIVE
+ state_ =
+ (source == CS_LOCAL) ? ST_SENTUPDATEDOFFER : ST_RECEIVEDUPDATEDOFFER;
+ }
+ return true;
+}
+
+bool SrtpFilter::ExpectAnswer(ContentSource source) {
+ return ((state_ == ST_SENTOFFER && source == CS_REMOTE) ||
+ (state_ == ST_RECEIVEDOFFER && source == CS_LOCAL) ||
+ (state_ == ST_SENTUPDATEDOFFER && source == CS_REMOTE) ||
+ (state_ == ST_RECEIVEDUPDATEDOFFER && source == CS_LOCAL) ||
+ (state_ == ST_SENTPRANSWER_NO_CRYPTO && source == CS_LOCAL) ||
+ (state_ == ST_SENTPRANSWER && source == CS_LOCAL) ||
+ (state_ == ST_RECEIVEDPRANSWER_NO_CRYPTO && source == CS_REMOTE) ||
+ (state_ == ST_RECEIVEDPRANSWER && source == CS_REMOTE));
+}
+
+bool SrtpFilter::DoSetAnswer(const std::vector<CryptoParams>& answer_params,
+ ContentSource source,
+ bool final) {
+ if (!ExpectAnswer(source)) {
+ LOG(LS_ERROR) << "Invalid state for SRTP answer";
+ return false;
+ }
+
+ // If the answer doesn't requests crypto complete the negotiation of an
+ // unencrypted session.
+ // Otherwise, finalize the parameters and apply them.
+ if (answer_params.empty()) {
+ if (final) {
+ return ResetParams();
+ } else {
+ // Need to wait for the final answer to decide if
+ // we should go to Active state.
+ state_ = (source == CS_LOCAL) ? ST_SENTPRANSWER_NO_CRYPTO :
+ ST_RECEIVEDPRANSWER_NO_CRYPTO;
+ return true;
+ }
+ }
+ CryptoParams selected_params;
+ if (!NegotiateParams(answer_params, &selected_params))
+ return false;
+ const CryptoParams& send_params =
+ (source == CS_REMOTE) ? selected_params : answer_params[0];
+ const CryptoParams& recv_params =
+ (source == CS_REMOTE) ? answer_params[0] : selected_params;
+ if (!ApplyParams(send_params, recv_params)) {
+ return false;
+ }
+
+ if (final) {
+ offer_params_.clear();
+ state_ = ST_ACTIVE;
+ } else {
+ state_ =
+ (source == CS_LOCAL) ? ST_SENTPRANSWER : ST_RECEIVEDPRANSWER;
+ }
+ return true;
+}
+
+void SrtpFilter::CreateSrtpSessions() {
+ send_session_.reset(new SrtpSession());
+ applied_send_params_ = CryptoParams();
+ recv_session_.reset(new SrtpSession());
+ applied_recv_params_ = CryptoParams();
+
+ SignalSrtpError.repeat(send_session_->SignalSrtpError);
+ SignalSrtpError.repeat(recv_session_->SignalSrtpError);
+
+ send_session_->set_signal_silent_time(signal_silent_time_in_ms_);
+ recv_session_->set_signal_silent_time(signal_silent_time_in_ms_);
+}
+
+bool SrtpFilter::NegotiateParams(const std::vector<CryptoParams>& answer_params,
+ CryptoParams* selected_params) {
+ // We're processing an accept. We should have exactly one set of params,
+ // unless the offer didn't mention crypto, in which case we shouldn't be here.
+ bool ret = (answer_params.size() == 1U && !offer_params_.empty());
+ if (ret) {
+ // We should find a match between the answer params and the offered params.
+ std::vector<CryptoParams>::const_iterator it;
+ for (it = offer_params_.begin(); it != offer_params_.end(); ++it) {
+ if (answer_params[0].Matches(*it)) {
+ break;
+ }
+ }
+
+ if (it != offer_params_.end()) {
+ *selected_params = *it;
+ } else {
+ ret = false;
+ }
+ }
+
+ if (!ret) {
+ LOG(LS_WARNING) << "Invalid parameters in SRTP answer";
+ }
+ return ret;
+}
+
+bool SrtpFilter::ApplyParams(const CryptoParams& send_params,
+ const CryptoParams& recv_params) {
+ // TODO(jiayl): Split this method to apply send and receive CryptoParams
+ // independently, so that we can skip one method when either send or receive
+ // CryptoParams is unchanged.
+ if (applied_send_params_.cipher_suite == send_params.cipher_suite &&
+ applied_send_params_.key_params == send_params.key_params &&
+ applied_recv_params_.cipher_suite == recv_params.cipher_suite &&
+ applied_recv_params_.key_params == recv_params.key_params) {
+ LOG(LS_INFO) << "Applying the same SRTP parameters again. No-op.";
+
+ // We do not want to reset the ROC if the keys are the same. So just return.
+ return true;
+ }
+ // TODO(juberti): Zero these buffers after use.
+ bool ret;
+ uint8 send_key[SRTP_MASTER_KEY_LEN], recv_key[SRTP_MASTER_KEY_LEN];
+ ret = (ParseKeyParams(send_params.key_params, send_key, sizeof(send_key)) &&
+ ParseKeyParams(recv_params.key_params, recv_key, sizeof(recv_key)));
+ if (ret) {
+ CreateSrtpSessions();
+ ret = (send_session_->SetSend(send_params.cipher_suite,
+ send_key, sizeof(send_key)) &&
+ recv_session_->SetRecv(recv_params.cipher_suite,
+ recv_key, sizeof(recv_key)));
+ }
+ if (ret) {
+ LOG(LS_INFO) << "SRTP activated with negotiated parameters:"
+ << " send cipher_suite " << send_params.cipher_suite
+ << " recv cipher_suite " << recv_params.cipher_suite;
+ applied_send_params_ = send_params;
+ applied_recv_params_ = recv_params;
+ } else {
+ LOG(LS_WARNING) << "Failed to apply negotiated SRTP parameters";
+ }
+ return ret;
+}
+
+bool SrtpFilter::ResetParams() {
+ offer_params_.clear();
+ state_ = ST_INIT;
+ LOG(LS_INFO) << "SRTP reset to init state";
+ return true;
+}
+
+bool SrtpFilter::ParseKeyParams(const std::string& key_params,
+ uint8* key, int len) {
+ // example key_params: "inline:YUJDZGVmZ2hpSktMbW9QUXJzVHVWd3l6MTIzNDU2"
+
+ // Fail if key-method is wrong.
+ if (key_params.find("inline:") != 0) {
+ return false;
+ }
+
+ // Fail if base64 decode fails, or the key is the wrong size.
+ std::string key_b64(key_params.substr(7)), key_str;
+ if (!talk_base::Base64::Decode(key_b64, talk_base::Base64::DO_STRICT,
+ &key_str, NULL) ||
+ static_cast<int>(key_str.size()) != len) {
+ return false;
+ }
+
+ memcpy(key, key_str.c_str(), len);
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SrtpSession
+
+#ifdef HAVE_SRTP
+
+bool SrtpSession::inited_ = false;
+
+SrtpSession::SrtpSession()
+ : session_(NULL),
+ rtp_auth_tag_len_(0),
+ rtcp_auth_tag_len_(0),
+ srtp_stat_(new SrtpStat()),
+ last_send_seq_num_(-1) {
+ sessions()->push_back(this);
+ SignalSrtpError.repeat(srtp_stat_->SignalSrtpError);
+}
+
+SrtpSession::~SrtpSession() {
+ sessions()->erase(std::find(sessions()->begin(), sessions()->end(), this));
+ if (session_) {
+ srtp_dealloc(session_);
+ }
+}
+
+bool SrtpSession::SetSend(const std::string& cs, const uint8* key, int len) {
+ return SetKey(ssrc_any_outbound, cs, key, len);
+}
+
+bool SrtpSession::SetRecv(const std::string& cs, const uint8* key, int len) {
+ return SetKey(ssrc_any_inbound, cs, key, len);
+}
+
+bool SrtpSession::ProtectRtp(void* p, int in_len, int max_len, int* out_len) {
+ if (!session_) {
+ LOG(LS_WARNING) << "Failed to protect SRTP packet: no SRTP Session";
+ return false;
+ }
+
+ int need_len = in_len + rtp_auth_tag_len_; // NOLINT
+ if (max_len < need_len) {
+ LOG(LS_WARNING) << "Failed to protect SRTP packet: The buffer length "
+ << max_len << " is less than the needed " << need_len;
+ return false;
+ }
+
+ *out_len = in_len;
+ int err = srtp_protect(session_, p, out_len);
+ uint32 ssrc;
+ if (GetRtpSsrc(p, in_len, &ssrc)) {
+ srtp_stat_->AddProtectRtpResult(ssrc, err);
+ }
+ int seq_num;
+ GetRtpSeqNum(p, in_len, &seq_num);
+ if (err != err_status_ok) {
+ LOG(LS_WARNING) << "Failed to protect SRTP packet, seqnum="
+ << seq_num << ", err=" << err << ", last seqnum="
+ << last_send_seq_num_;
+ return false;
+ }
+ last_send_seq_num_ = seq_num;
+ return true;
+}
+
+bool SrtpSession::ProtectRtcp(void* p, int in_len, int max_len, int* out_len) {
+ if (!session_) {
+ LOG(LS_WARNING) << "Failed to protect SRTCP packet: no SRTP Session";
+ return false;
+ }
+
+ int need_len = in_len + sizeof(uint32) + rtcp_auth_tag_len_; // NOLINT
+ if (max_len < need_len) {
+ LOG(LS_WARNING) << "Failed to protect SRTCP packet: The buffer length "
+ << max_len << " is less than the needed " << need_len;
+ return false;
+ }
+
+ *out_len = in_len;
+ int err = srtp_protect_rtcp(session_, p, out_len);
+ srtp_stat_->AddProtectRtcpResult(err);
+ if (err != err_status_ok) {
+ LOG(LS_WARNING) << "Failed to protect SRTCP packet, err=" << err;
+ return false;
+ }
+ return true;
+}
+
+bool SrtpSession::UnprotectRtp(void* p, int in_len, int* out_len) {
+ if (!session_) {
+ LOG(LS_WARNING) << "Failed to unprotect SRTP packet: no SRTP Session";
+ return false;
+ }
+
+ *out_len = in_len;
+ int err = srtp_unprotect(session_, p, out_len);
+ uint32 ssrc;
+ if (GetRtpSsrc(p, in_len, &ssrc)) {
+ srtp_stat_->AddUnprotectRtpResult(ssrc, err);
+ }
+ if (err != err_status_ok) {
+ LOG(LS_WARNING) << "Failed to unprotect SRTP packet, err=" << err;
+ return false;
+ }
+ return true;
+}
+
+bool SrtpSession::UnprotectRtcp(void* p, int in_len, int* out_len) {
+ if (!session_) {
+ LOG(LS_WARNING) << "Failed to unprotect SRTCP packet: no SRTP Session";
+ return false;
+ }
+
+ *out_len = in_len;
+ int err = srtp_unprotect_rtcp(session_, p, out_len);
+ srtp_stat_->AddUnprotectRtcpResult(err);
+ if (err != err_status_ok) {
+ LOG(LS_WARNING) << "Failed to unprotect SRTCP packet, err=" << err;
+ return false;
+ }
+ return true;
+}
+
+void SrtpSession::set_signal_silent_time(uint32 signal_silent_time_in_ms) {
+ srtp_stat_->set_signal_silent_time(signal_silent_time_in_ms);
+}
+
+bool SrtpSession::SetKey(int type, const std::string& cs,
+ const uint8* key, int len) {
+ if (session_) {
+ LOG(LS_ERROR) << "Failed to create SRTP session: "
+ << "SRTP session already created";
+ return false;
+ }
+
+ if (!Init()) {
+ return false;
+ }
+
+ srtp_policy_t policy;
+ memset(&policy, 0, sizeof(policy));
+
+ if (cs == CS_AES_CM_128_HMAC_SHA1_80) {
+ crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtp);
+ crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp);
+ } else if (cs == CS_AES_CM_128_HMAC_SHA1_32) {
+ crypto_policy_set_aes_cm_128_hmac_sha1_32(&policy.rtp); // rtp is 32,
+ crypto_policy_set_aes_cm_128_hmac_sha1_80(&policy.rtcp); // rtcp still 80
+ } else {
+ LOG(LS_WARNING) << "Failed to create SRTP session: unsupported"
+ << " cipher_suite " << cs.c_str();
+ return false;
+ }
+
+ if (!key || len != SRTP_MASTER_KEY_LEN) {
+ LOG(LS_WARNING) << "Failed to create SRTP session: invalid key";
+ return false;
+ }
+
+ policy.ssrc.type = static_cast<ssrc_type_t>(type);
+ policy.ssrc.value = 0;
+ policy.key = const_cast<uint8*>(key);
+ // TODO(astor) parse window size from WSH session-param
+ policy.window_size = 1024;
+ policy.allow_repeat_tx = 1;
+ policy.next = NULL;
+
+ int err = srtp_create(&session_, &policy);
+ if (err != err_status_ok) {
+ LOG(LS_ERROR) << "Failed to create SRTP session, err=" << err;
+ return false;
+ }
+
+ rtp_auth_tag_len_ = policy.rtp.auth_tag_len;
+ rtcp_auth_tag_len_ = policy.rtcp.auth_tag_len;
+ return true;
+}
+
+bool SrtpSession::Init() {
+ if (!inited_) {
+ int err;
+ err = srtp_init();
+ if (err != err_status_ok) {
+ LOG(LS_ERROR) << "Failed to init SRTP, err=" << err;
+ return false;
+ }
+
+ err = srtp_install_event_handler(&SrtpSession::HandleEventThunk);
+ if (err != err_status_ok) {
+ LOG(LS_ERROR) << "Failed to install SRTP event handler, err=" << err;
+ return false;
+ }
+
+ inited_ = true;
+ }
+
+ return true;
+}
+
+void SrtpSession::Terminate() {
+ if (inited_) {
+ int err = srtp_shutdown();
+ if (err) {
+ LOG(LS_ERROR) << "srtp_shutdown failed. err=" << err;
+ return;
+ }
+ inited_ = false;
+ }
+}
+
+void SrtpSession::HandleEvent(const srtp_event_data_t* ev) {
+ switch (ev->event) {
+ case event_ssrc_collision:
+ LOG(LS_INFO) << "SRTP event: SSRC collision";
+ break;
+ case event_key_soft_limit:
+ LOG(LS_INFO) << "SRTP event: reached soft key usage limit";
+ break;
+ case event_key_hard_limit:
+ LOG(LS_INFO) << "SRTP event: reached hard key usage limit";
+ break;
+ case event_packet_index_limit:
+ LOG(LS_INFO) << "SRTP event: reached hard packet limit (2^48 packets)";
+ break;
+ default:
+ LOG(LS_INFO) << "SRTP event: unknown " << ev->event;
+ break;
+ }
+}
+
+void SrtpSession::HandleEventThunk(srtp_event_data_t* ev) {
+ for (std::list<SrtpSession*>::iterator it = sessions()->begin();
+ it != sessions()->end(); ++it) {
+ if ((*it)->session_ == ev->session) {
+ (*it)->HandleEvent(ev);
+ break;
+ }
+ }
+}
+
+std::list<SrtpSession*>* SrtpSession::sessions() {
+ LIBJINGLE_DEFINE_STATIC_LOCAL(std::list<SrtpSession*>, sessions, ());
+ return &sessions;
+}
+
+#else // !HAVE_SRTP
+
+// On some systems, SRTP is not (yet) available.
+
+SrtpSession::SrtpSession() {
+ LOG(WARNING) << "SRTP implementation is missing.";
+}
+
+SrtpSession::~SrtpSession() {
+}
+
+bool SrtpSession::SetSend(const std::string& cs, const uint8* key, int len) {
+ return SrtpNotAvailable(__FUNCTION__);
+}
+
+bool SrtpSession::SetRecv(const std::string& cs, const uint8* key, int len) {
+ return SrtpNotAvailable(__FUNCTION__);
+}
+
+bool SrtpSession::ProtectRtp(void* data, int in_len, int max_len,
+ int* out_len) {
+ return SrtpNotAvailable(__FUNCTION__);
+}
+
+bool SrtpSession::ProtectRtcp(void* data, int in_len, int max_len,
+ int* out_len) {
+ return SrtpNotAvailable(__FUNCTION__);
+}
+
+bool SrtpSession::UnprotectRtp(void* data, int in_len, int* out_len) {
+ return SrtpNotAvailable(__FUNCTION__);
+}
+
+bool SrtpSession::UnprotectRtcp(void* data, int in_len, int* out_len) {
+ return SrtpNotAvailable(__FUNCTION__);
+}
+
+void SrtpSession::set_signal_silent_time(uint32 signal_silent_time) {
+ // Do nothing.
+}
+
+#endif // HAVE_SRTP
+
+///////////////////////////////////////////////////////////////////////////////
+// SrtpStat
+
+#ifdef HAVE_SRTP
+
+SrtpStat::SrtpStat()
+ : signal_silent_time_(1000) {
+}
+
+void SrtpStat::AddProtectRtpResult(uint32 ssrc, int result) {
+ FailureKey key;
+ key.ssrc = ssrc;
+ key.mode = SrtpFilter::PROTECT;
+ switch (result) {
+ case err_status_ok:
+ key.error = SrtpFilter::ERROR_NONE;
+ break;
+ case err_status_auth_fail:
+ key.error = SrtpFilter::ERROR_AUTH;
+ break;
+ default:
+ key.error = SrtpFilter::ERROR_FAIL;
+ }
+ HandleSrtpResult(key);
+}
+
+void SrtpStat::AddUnprotectRtpResult(uint32 ssrc, int result) {
+ FailureKey key;
+ key.ssrc = ssrc;
+ key.mode = SrtpFilter::UNPROTECT;
+ switch (result) {
+ case err_status_ok:
+ key.error = SrtpFilter::ERROR_NONE;
+ break;
+ case err_status_auth_fail:
+ key.error = SrtpFilter::ERROR_AUTH;
+ break;
+ case err_status_replay_fail:
+ case err_status_replay_old:
+ key.error = SrtpFilter::ERROR_REPLAY;
+ break;
+ default:
+ key.error = SrtpFilter::ERROR_FAIL;
+ }
+ HandleSrtpResult(key);
+}
+
+void SrtpStat::AddProtectRtcpResult(int result) {
+ AddProtectRtpResult(0U, result);
+}
+
+void SrtpStat::AddUnprotectRtcpResult(int result) {
+ AddUnprotectRtpResult(0U, result);
+}
+
+void SrtpStat::HandleSrtpResult(const SrtpStat::FailureKey& key) {
+ // Handle some cases where error should be signalled right away. For other
+ // errors, trigger error for the first time seeing it. After that, silent
+ // the same error for a certain amount of time (default 1 sec).
+ if (key.error != SrtpFilter::ERROR_NONE) {
+ // For errors, signal first time and wait for 1 sec.
+ FailureStat* stat = &(failures_[key]);
+ uint32 current_time = talk_base::Time();
+ if (stat->last_signal_time == 0 ||
+ talk_base::TimeDiff(current_time, stat->last_signal_time) >
+ static_cast<int>(signal_silent_time_)) {
+ SignalSrtpError(key.ssrc, key.mode, key.error);
+ stat->last_signal_time = current_time;
+ }
+ }
+}
+
+#else // !HAVE_SRTP
+
+// On some systems, SRTP is not (yet) available.
+
+SrtpStat::SrtpStat()
+ : signal_silent_time_(1000) {
+ LOG(WARNING) << "SRTP implementation is missing.";
+}
+
+void SrtpStat::AddProtectRtpResult(uint32 ssrc, int result) {
+ SrtpNotAvailable(__FUNCTION__);
+}
+
+void SrtpStat::AddUnprotectRtpResult(uint32 ssrc, int result) {
+ SrtpNotAvailable(__FUNCTION__);
+}
+
+void SrtpStat::AddProtectRtcpResult(int result) {
+ SrtpNotAvailable(__FUNCTION__);
+}
+
+void SrtpStat::AddUnprotectRtcpResult(int result) {
+ SrtpNotAvailable(__FUNCTION__);
+}
+
+void SrtpStat::HandleSrtpResult(const SrtpStat::FailureKey& key) {
+ SrtpNotAvailable(__FUNCTION__);
+}
+
+#endif // HAVE_SRTP
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/srtpfilter.h b/chromium/third_party/libjingle/source/talk/session/media/srtpfilter.h
new file mode 100644
index 00000000000..9b48dcd957c
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/srtpfilter.h
@@ -0,0 +1,308 @@
+/*
+ * libjingle
+ * Copyright 2009 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_SRTPFILTER_H_
+#define TALK_SESSION_MEDIA_SRTPFILTER_H_
+
+#include <list>
+#include <map>
+#include <string>
+#include <vector>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/sigslotrepeater.h"
+#include "talk/media/base/cryptoparams.h"
+#include "talk/p2p/base/sessiondescription.h"
+
+// Forward declaration to avoid pulling in libsrtp headers here
+struct srtp_event_data_t;
+struct srtp_ctx_t;
+typedef srtp_ctx_t* srtp_t;
+struct srtp_policy_t;
+
+namespace cricket {
+
+// Cipher suite to use for SRTP. Typically a 80-bit HMAC will be used, except
+// in applications (voice) where the additional bandwidth may be significant.
+// A 80-bit HMAC is always used for SRTCP.
+// 128-bit AES with 80-bit SHA-1 HMAC.
+extern const char CS_AES_CM_128_HMAC_SHA1_80[];
+// 128-bit AES with 32-bit SHA-1 HMAC.
+extern const char CS_AES_CM_128_HMAC_SHA1_32[];
+// Key is 128 bits and salt is 112 bits == 30 bytes. B64 bloat => 40 bytes.
+extern const int SRTP_MASTER_KEY_BASE64_LEN;
+
+// Needed for DTLS-SRTP
+extern const int SRTP_MASTER_KEY_KEY_LEN;
+extern const int SRTP_MASTER_KEY_SALT_LEN;
+
+class SrtpSession;
+class SrtpStat;
+
+void EnableSrtpDebugging();
+void ShutdownSrtp();
+
+// Class to transform SRTP to/from RTP.
+// Initialize by calling SetSend with the local security params, then call
+// SetRecv once the remote security params are received. At that point
+// Protect/UnprotectRt(c)p can be called to encrypt/decrypt data.
+// TODO: Figure out concurrency policy for SrtpFilter.
+class SrtpFilter {
+ public:
+ enum Mode {
+ PROTECT,
+ UNPROTECT
+ };
+ enum Error {
+ ERROR_NONE,
+ ERROR_FAIL,
+ ERROR_AUTH,
+ ERROR_REPLAY,
+ };
+
+ SrtpFilter();
+ ~SrtpFilter();
+
+ // Whether the filter is active (i.e. crypto has been properly negotiated).
+ bool IsActive() const;
+
+ // Indicates which crypto algorithms and keys were contained in the offer.
+ // offer_params should contain a list of available parameters to use, or none,
+ // if crypto is not desired. This must be called before SetAnswer.
+ bool SetOffer(const std::vector<CryptoParams>& offer_params,
+ ContentSource source);
+ // Same as SetAnwer. But multiple calls are allowed to SetProvisionalAnswer
+ // after a call to SetOffer.
+ bool SetProvisionalAnswer(const std::vector<CryptoParams>& answer_params,
+ ContentSource source);
+ // Indicates which crypto algorithms and keys were contained in the answer.
+ // answer_params should contain the negotiated parameters, which may be none,
+ // if crypto was not desired or could not be negotiated (and not required).
+ // This must be called after SetOffer. If crypto negotiation completes
+ // successfully, this will advance the filter to the active state.
+ bool SetAnswer(const std::vector<CryptoParams>& answer_params,
+ ContentSource source);
+
+ // Just set up both sets of keys directly.
+ // Used with DTLS-SRTP.
+ bool SetRtpParams(const std::string& send_cs,
+ const uint8* send_key, int send_key_len,
+ const std::string& recv_cs,
+ const uint8* recv_key, int recv_key_len);
+ bool SetRtcpParams(const std::string& send_cs,
+ const uint8* send_key, int send_key_len,
+ const std::string& recv_cs,
+ const uint8* recv_key, int recv_key_len);
+
+ // Encrypts/signs an individual RTP/RTCP packet, in-place.
+ // If an HMAC is used, this will increase the packet size.
+ bool ProtectRtp(void* data, int in_len, int max_len, int* out_len);
+ bool ProtectRtcp(void* data, int in_len, int max_len, int* out_len);
+ // Decrypts/verifies an invidiual RTP/RTCP packet.
+ // If an HMAC is used, this will decrease the packet size.
+ bool UnprotectRtp(void* data, int in_len, int* out_len);
+ bool UnprotectRtcp(void* data, int in_len, int* out_len);
+
+ // Update the silent threshold (in ms) for signaling errors.
+ void set_signal_silent_time(uint32 signal_silent_time_in_ms);
+
+ sigslot::repeater3<uint32, Mode, Error> SignalSrtpError;
+
+ protected:
+ bool ExpectOffer(ContentSource source);
+ bool StoreParams(const std::vector<CryptoParams>& params,
+ ContentSource source);
+ bool ExpectAnswer(ContentSource source);
+ bool DoSetAnswer(const std::vector<CryptoParams>& answer_params,
+ ContentSource source,
+ bool final);
+ void CreateSrtpSessions();
+ bool NegotiateParams(const std::vector<CryptoParams>& answer_params,
+ CryptoParams* selected_params);
+ bool ApplyParams(const CryptoParams& send_params,
+ const CryptoParams& recv_params);
+ bool ResetParams();
+ static bool ParseKeyParams(const std::string& params, uint8* key, int len);
+
+ private:
+ enum State {
+ ST_INIT, // SRTP filter unused.
+ ST_SENTOFFER, // Offer with SRTP parameters sent.
+ ST_RECEIVEDOFFER, // Offer with SRTP parameters received.
+ ST_SENTPRANSWER_NO_CRYPTO, // Sent provisional answer without crypto.
+ // Received provisional answer without crypto.
+ ST_RECEIVEDPRANSWER_NO_CRYPTO,
+ ST_ACTIVE, // Offer and answer set.
+ // SRTP filter is active but new parameters are offered.
+ // When the answer is set, the state transitions to ST_ACTIVE or ST_INIT.
+ ST_SENTUPDATEDOFFER,
+ // SRTP filter is active but new parameters are received.
+ // When the answer is set, the state transitions back to ST_ACTIVE.
+ ST_RECEIVEDUPDATEDOFFER,
+ // SRTP filter is active but the sent answer is only provisional.
+ // When the final answer is set, the state transitions to ST_ACTIVE or
+ // ST_INIT.
+ ST_SENTPRANSWER,
+ // SRTP filter is active but the received answer is only provisional.
+ // When the final answer is set, the state transitions to ST_ACTIVE or
+ // ST_INIT.
+ ST_RECEIVEDPRANSWER
+ };
+ State state_;
+ uint32 signal_silent_time_in_ms_;
+ std::vector<CryptoParams> offer_params_;
+ talk_base::scoped_ptr<SrtpSession> send_session_;
+ talk_base::scoped_ptr<SrtpSession> recv_session_;
+ talk_base::scoped_ptr<SrtpSession> send_rtcp_session_;
+ talk_base::scoped_ptr<SrtpSession> recv_rtcp_session_;
+ CryptoParams applied_send_params_;
+ CryptoParams applied_recv_params_;
+};
+
+// Class that wraps a libSRTP session.
+class SrtpSession {
+ public:
+ SrtpSession();
+ ~SrtpSession();
+
+ // Configures the session for sending data using the specified
+ // cipher-suite and key. Receiving must be done by a separate session.
+ bool SetSend(const std::string& cs, const uint8* key, int len);
+ // Configures the session for receiving data using the specified
+ // cipher-suite and key. Sending must be done by a separate session.
+ bool SetRecv(const std::string& cs, const uint8* key, int len);
+
+ // Encrypts/signs an individual RTP/RTCP packet, in-place.
+ // If an HMAC is used, this will increase the packet size.
+ bool ProtectRtp(void* data, int in_len, int max_len, int* out_len);
+ bool ProtectRtcp(void* data, int in_len, int max_len, int* out_len);
+ // Decrypts/verifies an invidiual RTP/RTCP packet.
+ // If an HMAC is used, this will decrease the packet size.
+ bool UnprotectRtp(void* data, int in_len, int* out_len);
+ bool UnprotectRtcp(void* data, int in_len, int* out_len);
+
+ // Update the silent threshold (in ms) for signaling errors.
+ void set_signal_silent_time(uint32 signal_silent_time_in_ms);
+
+ // Calls srtp_shutdown if it's initialized.
+ static void Terminate();
+
+ sigslot::repeater3<uint32, SrtpFilter::Mode, SrtpFilter::Error>
+ SignalSrtpError;
+
+ private:
+ bool SetKey(int type, const std::string& cs, const uint8* key, int len);
+ static bool Init();
+ void HandleEvent(const srtp_event_data_t* ev);
+ static void HandleEventThunk(srtp_event_data_t* ev);
+ static std::list<SrtpSession*>* sessions();
+
+ srtp_t session_;
+ int rtp_auth_tag_len_;
+ int rtcp_auth_tag_len_;
+ talk_base::scoped_ptr<SrtpStat> srtp_stat_;
+ static bool inited_;
+ int last_send_seq_num_;
+ DISALLOW_COPY_AND_ASSIGN(SrtpSession);
+};
+
+// Class that collects failures of SRTP.
+class SrtpStat {
+ public:
+ SrtpStat();
+
+ // Report RTP protection results to the handler.
+ void AddProtectRtpResult(uint32 ssrc, int result);
+ // Report RTP unprotection results to the handler.
+ void AddUnprotectRtpResult(uint32 ssrc, int result);
+ // Report RTCP protection results to the handler.
+ void AddProtectRtcpResult(int result);
+ // Report RTCP unprotection results to the handler.
+ void AddUnprotectRtcpResult(int result);
+
+ // Get silent time (in ms) for SRTP statistics handler.
+ uint32 signal_silent_time() const { return signal_silent_time_; }
+ // Set silent time (in ms) for SRTP statistics handler.
+ void set_signal_silent_time(uint32 signal_silent_time) {
+ signal_silent_time_ = signal_silent_time;
+ }
+
+ // Sigslot for reporting errors.
+ sigslot::signal3<uint32, SrtpFilter::Mode, SrtpFilter::Error>
+ SignalSrtpError;
+
+ private:
+ // For each different ssrc and error, we collect statistics separately.
+ struct FailureKey {
+ FailureKey()
+ : ssrc(0),
+ mode(SrtpFilter::PROTECT),
+ error(SrtpFilter::ERROR_NONE) {
+ }
+ FailureKey(uint32 in_ssrc, SrtpFilter::Mode in_mode,
+ SrtpFilter::Error in_error)
+ : ssrc(in_ssrc),
+ mode(in_mode),
+ error(in_error) {
+ }
+ bool operator <(const FailureKey& key) const {
+ return ssrc < key.ssrc || mode < key.mode || error < key.error;
+ }
+ uint32 ssrc;
+ SrtpFilter::Mode mode;
+ SrtpFilter::Error error;
+ };
+ // For tracing conditions for signaling, currently we only use
+ // last_signal_time. Wrap this as a struct so that later on, if we need any
+ // other improvements, it will be easier.
+ struct FailureStat {
+ FailureStat()
+ : last_signal_time(0) {
+ }
+ explicit FailureStat(uint32 in_last_signal_time)
+ : last_signal_time(in_last_signal_time) {
+ }
+ void Reset() {
+ last_signal_time = 0;
+ }
+ uint32 last_signal_time;
+ };
+
+ // Inspect SRTP result and signal error if needed.
+ void HandleSrtpResult(const FailureKey& key);
+
+ std::map<FailureKey, FailureStat> failures_;
+ // Threshold in ms to silent the signaling errors.
+ uint32 signal_silent_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(SrtpStat);
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_SRTPFILTER_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/srtpfilter_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/srtpfilter_unittest.cc
new file mode 100644
index 00000000000..1b4aef2796e
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/srtpfilter_unittest.cc
@@ -0,0 +1,863 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/base/byteorder.h"
+#include "talk/base/gunit.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/cryptoparams.h"
+#include "talk/media/base/fakertp.h"
+#include "talk/p2p/base/sessiondescription.h"
+#include "talk/session/media/srtpfilter.h"
+#ifdef SRTP_RELATIVE_PATH
+#include "crypto/include/err.h"
+#else
+#include "third_party/libsrtp/crypto/include/err.h"
+#endif
+
+using cricket::CS_AES_CM_128_HMAC_SHA1_80;
+using cricket::CS_AES_CM_128_HMAC_SHA1_32;
+using cricket::CryptoParams;
+using cricket::CS_LOCAL;
+using cricket::CS_REMOTE;
+
+static const uint8 kTestKey1[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234";
+static const uint8 kTestKey2[] = "4321ZYXWVUTSRQPONMLKJIHGFEDCBA";
+static const int kTestKeyLen = 30;
+static const std::string kTestKeyParams1 =
+ "inline:WVNfX19zZW1jdGwgKCkgewkyMjA7fQp9CnVubGVz";
+static const std::string kTestKeyParams2 =
+ "inline:PS1uQCVeeCFCanVmcjkpPywjNWhcYD0mXXtxaVBR";
+static const std::string kTestKeyParams3 =
+ "inline:1234X19zZW1jdGwgKCkgewkyMjA7fQp9CnVubGVz";
+static const std::string kTestKeyParams4 =
+ "inline:4567QCVeeCFCanVmcjkpPywjNWhcYD0mXXtxaVBR";
+static const cricket::CryptoParams kTestCryptoParams1(
+ 1, "AES_CM_128_HMAC_SHA1_80", kTestKeyParams1, "");
+static const cricket::CryptoParams kTestCryptoParams2(
+ 1, "AES_CM_128_HMAC_SHA1_80", kTestKeyParams2, "");
+
+static int rtp_auth_tag_len(const std::string& cs) {
+ return (cs == CS_AES_CM_128_HMAC_SHA1_32) ? 4 : 10;
+}
+static int rtcp_auth_tag_len(const std::string& cs) {
+ return 10;
+}
+
+class SrtpFilterTest : public testing::Test {
+ protected:
+ SrtpFilterTest()
+ // Need to initialize |sequence_number_|, the value does not matter.
+ : sequence_number_(1) {
+ }
+ static std::vector<CryptoParams> MakeVector(const CryptoParams& params) {
+ std::vector<CryptoParams> vec;
+ vec.push_back(params);
+ return vec;
+ }
+ void TestSetParams(const std::vector<CryptoParams>& params1,
+ const std::vector<CryptoParams>& params2) {
+ EXPECT_TRUE(f1_.SetOffer(params1, CS_LOCAL));
+ EXPECT_TRUE(f2_.SetOffer(params1, CS_REMOTE));
+ EXPECT_TRUE(f2_.SetAnswer(params2, CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(params2, CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+ }
+ void TestProtectUnprotect(const std::string& cs1, const std::string& cs2) {
+ char rtp_packet[sizeof(kPcmuFrame) + 10];
+ char original_rtp_packet[sizeof(kPcmuFrame)];
+ char rtcp_packet[sizeof(kRtcpReport) + 4 + 10];
+ int rtp_len = sizeof(kPcmuFrame), rtcp_len = sizeof(kRtcpReport), out_len;
+ memcpy(rtp_packet, kPcmuFrame, rtp_len);
+ // In order to be able to run this test function multiple times we can not
+ // use the same sequence number twice. Increase the sequence number by one.
+ talk_base::SetBE16(reinterpret_cast<uint8*>(rtp_packet) + 2,
+ ++sequence_number_);
+ memcpy(original_rtp_packet, rtp_packet, rtp_len);
+ memcpy(rtcp_packet, kRtcpReport, rtcp_len);
+
+ EXPECT_TRUE(f1_.ProtectRtp(rtp_packet, rtp_len,
+ sizeof(rtp_packet), &out_len));
+ EXPECT_EQ(out_len, rtp_len + rtp_auth_tag_len(cs1));
+ EXPECT_NE(0, memcmp(rtp_packet, original_rtp_packet, rtp_len));
+ EXPECT_TRUE(f2_.UnprotectRtp(rtp_packet, out_len, &out_len));
+ EXPECT_EQ(rtp_len, out_len);
+ EXPECT_EQ(0, memcmp(rtp_packet, original_rtp_packet, rtp_len));
+
+ EXPECT_TRUE(f2_.ProtectRtp(rtp_packet, rtp_len,
+ sizeof(rtp_packet), &out_len));
+ EXPECT_EQ(out_len, rtp_len + rtp_auth_tag_len(cs2));
+ EXPECT_NE(0, memcmp(rtp_packet, original_rtp_packet, rtp_len));
+ EXPECT_TRUE(f1_.UnprotectRtp(rtp_packet, out_len, &out_len));
+ EXPECT_EQ(rtp_len, out_len);
+ EXPECT_EQ(0, memcmp(rtp_packet, original_rtp_packet, rtp_len));
+
+ EXPECT_TRUE(f1_.ProtectRtcp(rtcp_packet, rtcp_len,
+ sizeof(rtcp_packet), &out_len));
+ EXPECT_EQ(out_len, rtcp_len + 4 + rtcp_auth_tag_len(cs1)); // NOLINT
+ EXPECT_NE(0, memcmp(rtcp_packet, kRtcpReport, rtcp_len));
+ EXPECT_TRUE(f2_.UnprotectRtcp(rtcp_packet, out_len, &out_len));
+ EXPECT_EQ(rtcp_len, out_len);
+ EXPECT_EQ(0, memcmp(rtcp_packet, kRtcpReport, rtcp_len));
+
+ EXPECT_TRUE(f2_.ProtectRtcp(rtcp_packet, rtcp_len,
+ sizeof(rtcp_packet), &out_len));
+ EXPECT_EQ(out_len, rtcp_len + 4 + rtcp_auth_tag_len(cs2)); // NOLINT
+ EXPECT_NE(0, memcmp(rtcp_packet, kRtcpReport, rtcp_len));
+ EXPECT_TRUE(f1_.UnprotectRtcp(rtcp_packet, out_len, &out_len));
+ EXPECT_EQ(rtcp_len, out_len);
+ EXPECT_EQ(0, memcmp(rtcp_packet, kRtcpReport, rtcp_len));
+ }
+ cricket::SrtpFilter f1_;
+ cricket::SrtpFilter f2_;
+ int sequence_number_;
+};
+
+// Test that we can set up the session and keys properly.
+TEST_F(SrtpFilterTest, TestGoodSetupOneCipherSuite) {
+ EXPECT_TRUE(f1_.SetOffer(MakeVector(kTestCryptoParams1), CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+}
+
+// Test that we can set up things with multiple params.
+TEST_F(SrtpFilterTest, TestGoodSetupMultipleCipherSuites) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ offer.push_back(kTestCryptoParams1);
+ offer[1].tag = 2;
+ offer[1].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+ answer[0].tag = 2;
+ answer[0].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+}
+
+// Test that we handle the cases where crypto is not desired.
+TEST_F(SrtpFilterTest, TestGoodSetupNoCipherSuites) {
+ std::vector<CryptoParams> offer, answer;
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we handle the cases where crypto is not desired by the remote side.
+TEST_F(SrtpFilterTest, TestGoodSetupNoAnswerCipherSuites) {
+ std::vector<CryptoParams> answer;
+ EXPECT_TRUE(f1_.SetOffer(MakeVector(kTestCryptoParams1), CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail if we call the functions the wrong way.
+TEST_F(SrtpFilterTest, TestBadSetup) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_LOCAL));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we can set offer multiple times from the same source.
+TEST_F(SrtpFilterTest, TestGoodSetupMultipleOffers) {
+ EXPECT_TRUE(f1_.SetOffer(MakeVector(kTestCryptoParams1), CS_LOCAL));
+ EXPECT_TRUE(f1_.SetOffer(MakeVector(kTestCryptoParams2), CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+ EXPECT_TRUE(f1_.SetOffer(MakeVector(kTestCryptoParams1), CS_LOCAL));
+ EXPECT_TRUE(f1_.SetOffer(MakeVector(kTestCryptoParams2), CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+
+ EXPECT_TRUE(f2_.SetOffer(MakeVector(kTestCryptoParams1), CS_REMOTE));
+ EXPECT_TRUE(f2_.SetOffer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+ EXPECT_TRUE(f2_.SetAnswer(MakeVector(kTestCryptoParams2), CS_LOCAL));
+ EXPECT_TRUE(f2_.IsActive());
+ EXPECT_TRUE(f2_.SetOffer(MakeVector(kTestCryptoParams1), CS_REMOTE));
+ EXPECT_TRUE(f2_.SetOffer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+ EXPECT_TRUE(f2_.SetAnswer(MakeVector(kTestCryptoParams2), CS_LOCAL));
+}
+// Test that we can't set offer multiple times from different sources.
+TEST_F(SrtpFilterTest, TestBadSetupMultipleOffers) {
+ EXPECT_TRUE(f1_.SetOffer(MakeVector(kTestCryptoParams1), CS_LOCAL));
+ EXPECT_FALSE(f1_.SetOffer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+ EXPECT_TRUE(f1_.SetAnswer(MakeVector(kTestCryptoParams1), CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+ EXPECT_TRUE(f1_.SetOffer(MakeVector(kTestCryptoParams2), CS_LOCAL));
+ EXPECT_FALSE(f1_.SetOffer(MakeVector(kTestCryptoParams1), CS_REMOTE));
+ EXPECT_TRUE(f1_.SetAnswer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+
+ EXPECT_TRUE(f2_.SetOffer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+ EXPECT_FALSE(f2_.SetOffer(MakeVector(kTestCryptoParams1), CS_LOCAL));
+ EXPECT_TRUE(f2_.SetAnswer(MakeVector(kTestCryptoParams2), CS_LOCAL));
+ EXPECT_TRUE(f2_.IsActive());
+ EXPECT_TRUE(f2_.SetOffer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+ EXPECT_FALSE(f2_.SetOffer(MakeVector(kTestCryptoParams1), CS_LOCAL));
+ EXPECT_TRUE(f2_.SetAnswer(MakeVector(kTestCryptoParams2), CS_LOCAL));
+}
+
+// Test that we fail if we have params in the answer when none were offered.
+TEST_F(SrtpFilterTest, TestNoAnswerCipherSuites) {
+ std::vector<CryptoParams> offer;
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(MakeVector(kTestCryptoParams2), CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail if we have too many params in our answer.
+TEST_F(SrtpFilterTest, TestMultipleAnswerCipherSuites) {
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ answer.push_back(kTestCryptoParams2);
+ answer[1].tag = 2;
+ answer[1].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+ EXPECT_TRUE(f1_.SetOffer(MakeVector(kTestCryptoParams1), CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail if we don't support the cipher-suite.
+TEST_F(SrtpFilterTest, TestInvalidCipherSuite) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ offer[0].cipher_suite = answer[0].cipher_suite = "FOO";
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail if we can't agree on a tag.
+TEST_F(SrtpFilterTest, TestNoMatchingTag) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ answer[0].tag = 99;
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail if we can't agree on a cipher-suite.
+TEST_F(SrtpFilterTest, TestNoMatchingCipherSuite) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ answer[0].tag = 2;
+ answer[0].cipher_suite = "FOO";
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail keys with bad base64 content.
+TEST_F(SrtpFilterTest, TestInvalidKeyData) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ answer[0].key_params = "inline:!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail keys with the wrong key-method.
+TEST_F(SrtpFilterTest, TestWrongKeyMethod) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ answer[0].key_params = "outline:PS1uQCVeeCFCanVmcjkpPywjNWhcYD0mXXtxaVBR";
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail keys of the wrong length.
+TEST_F(SrtpFilterTest, TestKeyTooShort) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ answer[0].key_params = "inline:PS1uQCVeeCFCanVmcjkpPywjNWhcYD0mXXtx";
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail keys of the wrong length.
+TEST_F(SrtpFilterTest, TestKeyTooLong) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ answer[0].key_params = "inline:PS1uQCVeeCFCanVmcjkpPywjNWhcYD0mXXtxaVBRABCD";
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we fail keys with lifetime or MKI set (since we don't support)
+TEST_F(SrtpFilterTest, TestUnsupportedOptions) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ answer[0].key_params =
+ "inline:PS1uQCVeeCFCanVmcjkpPywjNWhcYD0mXXtxaVBR|2^20|1:4";
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_FALSE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+}
+
+// Test that we can encrypt/decrypt after setting the same CryptoParams again on
+// one side.
+TEST_F(SrtpFilterTest, TestSettingSameKeyOnOneSide) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ TestSetParams(offer, answer);
+
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80,
+ CS_AES_CM_128_HMAC_SHA1_80);
+
+ // Re-applying the same keys on one end and it should not reset the ROC.
+ EXPECT_TRUE(f2_.SetOffer(offer, CS_REMOTE));
+ EXPECT_TRUE(f2_.SetAnswer(answer, CS_LOCAL));
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80, CS_AES_CM_128_HMAC_SHA1_80);
+}
+
+// Test that we can encrypt/decrypt after negotiating AES_CM_128_HMAC_SHA1_80.
+TEST_F(SrtpFilterTest, TestProtect_AES_CM_128_HMAC_SHA1_80) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ offer.push_back(kTestCryptoParams1);
+ offer[1].tag = 2;
+ offer[1].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+ TestSetParams(offer, answer);
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80, CS_AES_CM_128_HMAC_SHA1_80);
+}
+
+// Test that we can encrypt/decrypt after negotiating AES_CM_128_HMAC_SHA1_32.
+TEST_F(SrtpFilterTest, TestProtect_AES_CM_128_HMAC_SHA1_32) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+ offer.push_back(kTestCryptoParams1);
+ offer[1].tag = 2;
+ offer[1].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+ answer[0].tag = 2;
+ answer[0].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+ TestSetParams(offer, answer);
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_32, CS_AES_CM_128_HMAC_SHA1_32);
+}
+
+// Test that we can change encryption parameters.
+TEST_F(SrtpFilterTest, TestChangeParameters) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+
+ TestSetParams(offer, answer);
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80, CS_AES_CM_128_HMAC_SHA1_80);
+
+ // Change the key parameters and cipher_suite.
+ offer[0].key_params = kTestKeyParams3;
+ offer[0].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+ answer[0].key_params = kTestKeyParams4;
+ answer[0].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_TRUE(f2_.SetOffer(offer, CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+ EXPECT_TRUE(f1_.IsActive());
+
+ // Test that the old keys are valid until the negotiation is complete.
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80, CS_AES_CM_128_HMAC_SHA1_80);
+
+ // Complete the negotiation and test that we can still understand each other.
+ EXPECT_TRUE(f2_.SetAnswer(answer, CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(answer, CS_REMOTE));
+
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_32, CS_AES_CM_128_HMAC_SHA1_32);
+}
+
+// Test that we can send and receive provisional answers with crypto enabled.
+// Also test that we can change the crypto.
+TEST_F(SrtpFilterTest, TestProvisionalAnswer) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ offer.push_back(kTestCryptoParams1);
+ offer[1].tag = 2;
+ offer[1].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_TRUE(f2_.SetOffer(offer, CS_REMOTE));
+ EXPECT_TRUE(f2_.SetProvisionalAnswer(answer, CS_LOCAL));
+ EXPECT_TRUE(f1_.SetProvisionalAnswer(answer, CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+ EXPECT_TRUE(f2_.IsActive());
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80, CS_AES_CM_128_HMAC_SHA1_80);
+
+ answer[0].key_params = kTestKeyParams4;
+ answer[0].tag = 2;
+ answer[0].cipher_suite = CS_AES_CM_128_HMAC_SHA1_32;
+ EXPECT_TRUE(f2_.SetAnswer(answer, CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+ EXPECT_TRUE(f2_.IsActive());
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_32, CS_AES_CM_128_HMAC_SHA1_32);
+}
+
+// Test that a provisional answer doesn't need to contain a crypto.
+TEST_F(SrtpFilterTest, TestProvisionalAnswerWithoutCrypto) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer;
+
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_TRUE(f2_.SetOffer(offer, CS_REMOTE));
+ EXPECT_TRUE(f2_.SetProvisionalAnswer(answer, CS_LOCAL));
+ EXPECT_TRUE(f1_.SetProvisionalAnswer(answer, CS_REMOTE));
+ EXPECT_FALSE(f1_.IsActive());
+ EXPECT_FALSE(f2_.IsActive());
+
+ answer.push_back(kTestCryptoParams2);
+ EXPECT_TRUE(f2_.SetAnswer(answer, CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(answer, CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+ EXPECT_TRUE(f2_.IsActive());
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80, CS_AES_CM_128_HMAC_SHA1_80);
+}
+
+// Test that we can disable encryption.
+TEST_F(SrtpFilterTest, TestDisableEncryption) {
+ std::vector<CryptoParams> offer(MakeVector(kTestCryptoParams1));
+ std::vector<CryptoParams> answer(MakeVector(kTestCryptoParams2));
+
+ TestSetParams(offer, answer);
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80, CS_AES_CM_128_HMAC_SHA1_80);
+
+ offer.clear();
+ answer.clear();
+ EXPECT_TRUE(f1_.SetOffer(offer, CS_LOCAL));
+ EXPECT_TRUE(f2_.SetOffer(offer, CS_REMOTE));
+ EXPECT_TRUE(f1_.IsActive());
+ EXPECT_TRUE(f2_.IsActive());
+
+ // Test that the old keys are valid until the negotiation is complete.
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80, CS_AES_CM_128_HMAC_SHA1_80);
+
+ // Complete the negotiation.
+ EXPECT_TRUE(f2_.SetAnswer(answer, CS_LOCAL));
+ EXPECT_TRUE(f1_.SetAnswer(answer, CS_REMOTE));
+
+ EXPECT_FALSE(f1_.IsActive());
+ EXPECT_FALSE(f2_.IsActive());
+}
+
+// Test directly setting the params with AES_CM_128_HMAC_SHA1_80
+TEST_F(SrtpFilterTest, TestProtect_SetParamsDirect_AES_CM_128_HMAC_SHA1_80) {
+ EXPECT_TRUE(f1_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey1, kTestKeyLen,
+ CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey2, kTestKeyLen));
+ EXPECT_TRUE(f2_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey2, kTestKeyLen,
+ CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(f1_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey1, kTestKeyLen,
+ CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey2, kTestKeyLen));
+ EXPECT_TRUE(f2_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey2, kTestKeyLen,
+ CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(f1_.IsActive());
+ EXPECT_TRUE(f2_.IsActive());
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_80, CS_AES_CM_128_HMAC_SHA1_80);
+}
+
+// Test directly setting the params with AES_CM_128_HMAC_SHA1_32
+TEST_F(SrtpFilterTest, TestProtect_SetParamsDirect_AES_CM_128_HMAC_SHA1_32) {
+ EXPECT_TRUE(f1_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_32,
+ kTestKey1, kTestKeyLen,
+ CS_AES_CM_128_HMAC_SHA1_32,
+ kTestKey2, kTestKeyLen));
+ EXPECT_TRUE(f2_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_32,
+ kTestKey2, kTestKeyLen,
+ CS_AES_CM_128_HMAC_SHA1_32,
+ kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(f1_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_32,
+ kTestKey1, kTestKeyLen,
+ CS_AES_CM_128_HMAC_SHA1_32,
+ kTestKey2, kTestKeyLen));
+ EXPECT_TRUE(f2_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_32,
+ kTestKey2, kTestKeyLen,
+ CS_AES_CM_128_HMAC_SHA1_32,
+ kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(f1_.IsActive());
+ EXPECT_TRUE(f2_.IsActive());
+ TestProtectUnprotect(CS_AES_CM_128_HMAC_SHA1_32, CS_AES_CM_128_HMAC_SHA1_32);
+}
+
+// Test directly setting the params with bogus keys
+TEST_F(SrtpFilterTest, TestSetParamsKeyTooShort) {
+ EXPECT_FALSE(f1_.SetRtpParams(CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey1, kTestKeyLen - 1,
+ CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey1, kTestKeyLen - 1));
+ EXPECT_FALSE(f1_.SetRtcpParams(CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey1, kTestKeyLen - 1,
+ CS_AES_CM_128_HMAC_SHA1_80,
+ kTestKey1, kTestKeyLen - 1));
+}
+
+class SrtpSessionTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ rtp_len_ = sizeof(kPcmuFrame);
+ rtcp_len_ = sizeof(kRtcpReport);
+ memcpy(rtp_packet_, kPcmuFrame, rtp_len_);
+ memcpy(rtcp_packet_, kRtcpReport, rtcp_len_);
+ }
+ void TestProtectRtp(const std::string& cs) {
+ int out_len = 0;
+ EXPECT_TRUE(s1_.ProtectRtp(rtp_packet_, rtp_len_,
+ sizeof(rtp_packet_), &out_len));
+ EXPECT_EQ(out_len, rtp_len_ + rtp_auth_tag_len(cs));
+ EXPECT_NE(0, memcmp(rtp_packet_, kPcmuFrame, rtp_len_));
+ rtp_len_ = out_len;
+ }
+ void TestProtectRtcp(const std::string& cs) {
+ int out_len = 0;
+ EXPECT_TRUE(s1_.ProtectRtcp(rtcp_packet_, rtcp_len_,
+ sizeof(rtcp_packet_), &out_len));
+ EXPECT_EQ(out_len, rtcp_len_ + 4 + rtcp_auth_tag_len(cs)); // NOLINT
+ EXPECT_NE(0, memcmp(rtcp_packet_, kRtcpReport, rtcp_len_));
+ rtcp_len_ = out_len;
+ }
+ void TestUnprotectRtp(const std::string& cs) {
+ int out_len = 0, expected_len = sizeof(kPcmuFrame);
+ EXPECT_TRUE(s2_.UnprotectRtp(rtp_packet_, rtp_len_, &out_len));
+ EXPECT_EQ(expected_len, out_len);
+ EXPECT_EQ(0, memcmp(rtp_packet_, kPcmuFrame, out_len));
+ }
+ void TestUnprotectRtcp(const std::string& cs) {
+ int out_len = 0, expected_len = sizeof(kRtcpReport);
+ EXPECT_TRUE(s2_.UnprotectRtcp(rtcp_packet_, rtcp_len_, &out_len));
+ EXPECT_EQ(expected_len, out_len);
+ EXPECT_EQ(0, memcmp(rtcp_packet_, kRtcpReport, out_len));
+ }
+ cricket::SrtpSession s1_;
+ cricket::SrtpSession s2_;
+ char rtp_packet_[sizeof(kPcmuFrame) + 10];
+ char rtcp_packet_[sizeof(kRtcpReport) + 4 + 10];
+ int rtp_len_;
+ int rtcp_len_;
+};
+
+// Test that we can set up the session and keys properly.
+TEST_F(SrtpSessionTest, TestGoodSetup) {
+ EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+}
+
+// Test that we can't change the keys once set.
+TEST_F(SrtpSessionTest, TestBadSetup) {
+ EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_FALSE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey2, kTestKeyLen));
+ EXPECT_FALSE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey2, kTestKeyLen));
+}
+
+// Test that we fail keys of the wrong length.
+TEST_F(SrtpSessionTest, TestKeysTooShort) {
+ EXPECT_FALSE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, 1));
+ EXPECT_FALSE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, 1));
+}
+
+// Test that we can encrypt and decrypt RTP/RTCP using AES_CM_128_HMAC_SHA1_80.
+TEST_F(SrtpSessionTest, TestProtect_AES_CM_128_HMAC_SHA1_80) {
+ EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ TestProtectRtp(CS_AES_CM_128_HMAC_SHA1_80);
+ TestProtectRtcp(CS_AES_CM_128_HMAC_SHA1_80);
+ TestUnprotectRtp(CS_AES_CM_128_HMAC_SHA1_80);
+ TestUnprotectRtcp(CS_AES_CM_128_HMAC_SHA1_80);
+}
+
+// Test that we can encrypt and decrypt RTP/RTCP using AES_CM_128_HMAC_SHA1_32.
+TEST_F(SrtpSessionTest, TestProtect_AES_CM_128_HMAC_SHA1_32) {
+ EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_32, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_32, kTestKey1, kTestKeyLen));
+ TestProtectRtp(CS_AES_CM_128_HMAC_SHA1_32);
+ TestProtectRtcp(CS_AES_CM_128_HMAC_SHA1_32);
+ TestUnprotectRtp(CS_AES_CM_128_HMAC_SHA1_32);
+ TestUnprotectRtcp(CS_AES_CM_128_HMAC_SHA1_32);
+}
+
+// Test that we fail to unprotect if someone tampers with the RTP/RTCP paylaods.
+TEST_F(SrtpSessionTest, TestTamperReject) {
+ int out_len;
+ EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ TestProtectRtp(CS_AES_CM_128_HMAC_SHA1_80);
+ TestProtectRtcp(CS_AES_CM_128_HMAC_SHA1_80);
+ rtp_packet_[0] = 0x12;
+ rtcp_packet_[1] = 0x34;
+ EXPECT_FALSE(s2_.UnprotectRtp(rtp_packet_, rtp_len_, &out_len));
+ EXPECT_FALSE(s2_.UnprotectRtcp(rtcp_packet_, rtcp_len_, &out_len));
+}
+
+// Test that we fail to unprotect if the payloads are not authenticated.
+TEST_F(SrtpSessionTest, TestUnencryptReject) {
+ int out_len;
+ EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_FALSE(s2_.UnprotectRtp(rtp_packet_, rtp_len_, &out_len));
+ EXPECT_FALSE(s2_.UnprotectRtcp(rtcp_packet_, rtcp_len_, &out_len));
+}
+
+// Test that we fail when using buffers that are too small.
+TEST_F(SrtpSessionTest, TestBuffersTooSmall) {
+ int out_len;
+ EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_FALSE(s1_.ProtectRtp(rtp_packet_, rtp_len_,
+ sizeof(rtp_packet_) - 10, &out_len));
+ EXPECT_FALSE(s1_.ProtectRtcp(rtcp_packet_, rtcp_len_,
+ sizeof(rtcp_packet_) - 14, &out_len));
+}
+
+TEST_F(SrtpSessionTest, TestReplay) {
+ static const uint16 kMaxSeqnum = static_cast<uint16>(-1);
+ static const uint16 seqnum_big = 62275;
+ static const uint16 seqnum_small = 10;
+ static const uint16 replay_window = 1024;
+ int out_len;
+
+ EXPECT_TRUE(s1_.SetSend(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+ EXPECT_TRUE(s2_.SetRecv(CS_AES_CM_128_HMAC_SHA1_80, kTestKey1, kTestKeyLen));
+
+ // Initial sequence number.
+ talk_base::SetBE16(reinterpret_cast<uint8*>(rtp_packet_) + 2, seqnum_big);
+ EXPECT_TRUE(s1_.ProtectRtp(rtp_packet_, rtp_len_, sizeof(rtp_packet_),
+ &out_len));
+
+ // Replay within the 1024 window should succeed.
+ talk_base::SetBE16(reinterpret_cast<uint8*>(rtp_packet_) + 2,
+ seqnum_big - replay_window + 1);
+ EXPECT_TRUE(s1_.ProtectRtp(rtp_packet_, rtp_len_, sizeof(rtp_packet_),
+ &out_len));
+
+ // Replay out side of the 1024 window should fail.
+ talk_base::SetBE16(reinterpret_cast<uint8*>(rtp_packet_) + 2,
+ seqnum_big - replay_window - 1);
+ EXPECT_FALSE(s1_.ProtectRtp(rtp_packet_, rtp_len_, sizeof(rtp_packet_),
+ &out_len));
+
+ // Increment sequence number to a small number.
+ talk_base::SetBE16(reinterpret_cast<uint8*>(rtp_packet_) + 2, seqnum_small);
+ EXPECT_TRUE(s1_.ProtectRtp(rtp_packet_, rtp_len_, sizeof(rtp_packet_),
+ &out_len));
+
+ // Replay around 0 but out side of the 1024 window should fail.
+ talk_base::SetBE16(reinterpret_cast<uint8*>(rtp_packet_) + 2,
+ kMaxSeqnum + seqnum_small - replay_window - 1);
+ EXPECT_FALSE(s1_.ProtectRtp(rtp_packet_, rtp_len_, sizeof(rtp_packet_),
+ &out_len));
+
+ // Replay around 0 but within the 1024 window should succeed.
+ for (uint16 seqnum = 65000; seqnum < 65003; ++seqnum) {
+ talk_base::SetBE16(reinterpret_cast<uint8*>(rtp_packet_) + 2, seqnum);
+ EXPECT_TRUE(s1_.ProtectRtp(rtp_packet_, rtp_len_, sizeof(rtp_packet_),
+ &out_len));
+ }
+
+ // Go back to normal sequence nubmer.
+ // NOTE: without the fix in libsrtp, this would fail. This is because
+ // without the fix, the loop above would keep incrementing local sequence
+ // number in libsrtp, eventually the new sequence number would go out side
+ // of the window.
+ talk_base::SetBE16(reinterpret_cast<uint8*>(rtp_packet_) + 2,
+ seqnum_small + 1);
+ EXPECT_TRUE(s1_.ProtectRtp(rtp_packet_, rtp_len_, sizeof(rtp_packet_),
+ &out_len));
+}
+
+class SrtpStatTest
+ : public testing::Test,
+ public sigslot::has_slots<> {
+ public:
+ SrtpStatTest()
+ : ssrc_(0U),
+ mode_(-1),
+ error_(cricket::SrtpFilter::ERROR_NONE) {
+ srtp_stat_.SignalSrtpError.connect(this, &SrtpStatTest::OnSrtpError);
+ srtp_stat_.set_signal_silent_time(200);
+ }
+
+ protected:
+ void OnSrtpError(uint32 ssrc, cricket::SrtpFilter::Mode mode,
+ cricket::SrtpFilter::Error error) {
+ ssrc_ = ssrc;
+ mode_ = mode;
+ error_ = error;
+ }
+ void Reset() {
+ ssrc_ = 0U;
+ mode_ = -1;
+ error_ = cricket::SrtpFilter::ERROR_NONE;
+ }
+
+ cricket::SrtpStat srtp_stat_;
+ uint32 ssrc_;
+ int mode_;
+ cricket::SrtpFilter::Error error_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SrtpStatTest);
+};
+
+TEST_F(SrtpStatTest, TestProtectRtpError) {
+ Reset();
+ srtp_stat_.AddProtectRtpResult(1, err_status_ok);
+ EXPECT_EQ(0U, ssrc_);
+ EXPECT_EQ(-1, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_NONE, error_);
+ Reset();
+ srtp_stat_.AddProtectRtpResult(1, err_status_auth_fail);
+ EXPECT_EQ(1U, ssrc_);
+ EXPECT_EQ(cricket::SrtpFilter::PROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_AUTH, error_);
+ Reset();
+ srtp_stat_.AddProtectRtpResult(1, err_status_fail);
+ EXPECT_EQ(1U, ssrc_);
+ EXPECT_EQ(cricket::SrtpFilter::PROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_FAIL, error_);
+ // Within 200ms, the error will not be triggered.
+ Reset();
+ srtp_stat_.AddProtectRtpResult(1, err_status_fail);
+ EXPECT_EQ(0U, ssrc_);
+ EXPECT_EQ(-1, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_NONE, error_);
+ // Now the error will be triggered again.
+ Reset();
+ talk_base::Thread::Current()->SleepMs(210);
+ srtp_stat_.AddProtectRtpResult(1, err_status_fail);
+ EXPECT_EQ(1U, ssrc_);
+ EXPECT_EQ(cricket::SrtpFilter::PROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_FAIL, error_);
+}
+
+TEST_F(SrtpStatTest, TestUnprotectRtpError) {
+ Reset();
+ srtp_stat_.AddUnprotectRtpResult(1, err_status_ok);
+ EXPECT_EQ(0U, ssrc_);
+ EXPECT_EQ(-1, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_NONE, error_);
+ Reset();
+ srtp_stat_.AddUnprotectRtpResult(1, err_status_auth_fail);
+ EXPECT_EQ(1U, ssrc_);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_AUTH, error_);
+ Reset();
+ srtp_stat_.AddUnprotectRtpResult(1, err_status_replay_fail);
+ EXPECT_EQ(1U, ssrc_);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_REPLAY, error_);
+ Reset();
+ talk_base::Thread::Current()->SleepMs(210);
+ srtp_stat_.AddUnprotectRtpResult(1, err_status_replay_old);
+ EXPECT_EQ(1U, ssrc_);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_REPLAY, error_);
+ Reset();
+ srtp_stat_.AddUnprotectRtpResult(1, err_status_fail);
+ EXPECT_EQ(1U, ssrc_);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_FAIL, error_);
+ // Within 200ms, the error will not be triggered.
+ Reset();
+ srtp_stat_.AddUnprotectRtpResult(1, err_status_fail);
+ EXPECT_EQ(0U, ssrc_);
+ EXPECT_EQ(-1, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_NONE, error_);
+ // Now the error will be triggered again.
+ Reset();
+ talk_base::Thread::Current()->SleepMs(210);
+ srtp_stat_.AddUnprotectRtpResult(1, err_status_fail);
+ EXPECT_EQ(1U, ssrc_);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_FAIL, error_);
+}
+
+TEST_F(SrtpStatTest, TestProtectRtcpError) {
+ Reset();
+ srtp_stat_.AddProtectRtcpResult(err_status_ok);
+ EXPECT_EQ(-1, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_NONE, error_);
+ Reset();
+ srtp_stat_.AddProtectRtcpResult(err_status_auth_fail);
+ EXPECT_EQ(cricket::SrtpFilter::PROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_AUTH, error_);
+ Reset();
+ srtp_stat_.AddProtectRtcpResult(err_status_fail);
+ EXPECT_EQ(cricket::SrtpFilter::PROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_FAIL, error_);
+ // Within 200ms, the error will not be triggered.
+ Reset();
+ srtp_stat_.AddProtectRtcpResult(err_status_fail);
+ EXPECT_EQ(-1, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_NONE, error_);
+ // Now the error will be triggered again.
+ Reset();
+ talk_base::Thread::Current()->SleepMs(210);
+ srtp_stat_.AddProtectRtcpResult(err_status_fail);
+ EXPECT_EQ(cricket::SrtpFilter::PROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_FAIL, error_);
+}
+
+TEST_F(SrtpStatTest, TestUnprotectRtcpError) {
+ Reset();
+ srtp_stat_.AddUnprotectRtcpResult(err_status_ok);
+ EXPECT_EQ(-1, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_NONE, error_);
+ Reset();
+ srtp_stat_.AddUnprotectRtcpResult(err_status_auth_fail);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_AUTH, error_);
+ Reset();
+ srtp_stat_.AddUnprotectRtcpResult(err_status_replay_fail);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_REPLAY, error_);
+ Reset();
+ talk_base::Thread::Current()->SleepMs(210);
+ srtp_stat_.AddUnprotectRtcpResult(err_status_replay_fail);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_REPLAY, error_);
+ Reset();
+ srtp_stat_.AddUnprotectRtcpResult(err_status_fail);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_FAIL, error_);
+ // Within 200ms, the error will not be triggered.
+ Reset();
+ srtp_stat_.AddUnprotectRtcpResult(err_status_fail);
+ EXPECT_EQ(-1, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_NONE, error_);
+ // Now the error will be triggered again.
+ Reset();
+ talk_base::Thread::Current()->SleepMs(210);
+ srtp_stat_.AddUnprotectRtcpResult(err_status_fail);
+ EXPECT_EQ(cricket::SrtpFilter::UNPROTECT, mode_);
+ EXPECT_EQ(cricket::SrtpFilter::ERROR_FAIL, error_);
+}
diff --git a/chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter.cc b/chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter.cc
new file mode 100644
index 00000000000..638167d18e7
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter.cc
@@ -0,0 +1,93 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/ssrcmuxfilter.h"
+
+#include <algorithm>
+
+#include "talk/base/logging.h"
+#include "talk/media/base/rtputils.h"
+
+namespace cricket {
+
+static const uint32 kSsrc01 = 0x01;
+
+SsrcMuxFilter::SsrcMuxFilter() {
+}
+
+SsrcMuxFilter::~SsrcMuxFilter() {
+}
+
+bool SsrcMuxFilter::IsActive() const {
+ return !streams_.empty();
+}
+
+bool SsrcMuxFilter::DemuxPacket(const char* data, size_t len, bool rtcp) {
+ uint32 ssrc = 0;
+ if (!rtcp) {
+ GetRtpSsrc(data, len, &ssrc);
+ } else {
+ int pl_type = 0;
+ if (!GetRtcpType(data, len, &pl_type)) return false;
+ if (pl_type == kRtcpTypeSDES) {
+ // SDES packet parsing not supported.
+ LOG(LS_INFO) << "SDES packet received for demux.";
+ return true;
+ } else {
+ if (!GetRtcpSsrc(data, len, &ssrc)) return false;
+ if (ssrc == kSsrc01) {
+ // SSRC 1 has a special meaning and indicates generic feedback on
+ // some systems and should never be dropped. If it is forwarded
+ // incorrectly it will be ignored by lower layers anyway.
+ return true;
+ }
+ }
+ }
+ return FindStream(ssrc);
+}
+
+bool SsrcMuxFilter::AddStream(const StreamParams& stream) {
+ if (GetStreamBySsrc(streams_, stream.first_ssrc(), NULL)) {
+ LOG(LS_WARNING) << "Stream already added to filter";
+ return false;
+ }
+ streams_.push_back(stream);
+ return true;
+}
+
+bool SsrcMuxFilter::RemoveStream(uint32 ssrc) {
+ return RemoveStreamBySsrc(&streams_, ssrc);
+}
+
+bool SsrcMuxFilter::FindStream(uint32 ssrc) const {
+ if (ssrc == 0) {
+ return false;
+ }
+ return (GetStreamBySsrc(streams_, ssrc, NULL));
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter.h b/chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter.h
new file mode 100644
index 00000000000..9420f54cceb
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter.h
@@ -0,0 +1,67 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_SSRCMUXFILTER_H_
+#define TALK_SESSION_MEDIA_SSRCMUXFILTER_H_
+
+#include <vector>
+
+#include "talk/base/basictypes.h"
+#include "talk/media/base/streamparams.h"
+
+namespace cricket {
+
+// This class maintains list of recv SSRC's destined for cricket::BaseChannel.
+// In case of single RTP session and single transport channel, all session
+// ( or media) channels share a common transport channel. Hence they all get
+// SignalReadPacket when packet received on transport channel. This requires
+// cricket::BaseChannel to know all the valid sources, else media channel
+// will decode invalid packets.
+class SsrcMuxFilter {
+ public:
+ SsrcMuxFilter();
+ ~SsrcMuxFilter();
+
+ // Whether the rtp mux is active for a sdp session.
+ // Returns true if the filter contains a stream.
+ bool IsActive() const;
+ // Determines packet belongs to valid cricket::BaseChannel.
+ bool DemuxPacket(const char* data, size_t len, bool rtcp);
+ // Adding a valid source to the filter.
+ bool AddStream(const StreamParams& stream);
+ // Removes source from the filter.
+ bool RemoveStream(uint32 ssrc);
+ // Utility method added for unitest.
+ bool FindStream(uint32 ssrc) const;
+
+ private:
+ std::vector<StreamParams> streams_;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_SSRCMUXFILTER_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter_unittest.cc
new file mode 100644
index 00000000000..85a4dbe50da
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/ssrcmuxfilter_unittest.cc
@@ -0,0 +1,184 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "talk/base/gunit.h"
+#include "talk/session/media/ssrcmuxfilter.h"
+
+static const int kSsrc1 = 0x1111;
+static const int kSsrc2 = 0x2222;
+static const int kSsrc3 = 0x3333;
+
+using cricket::StreamParams;
+
+// SSRC = 0x1111
+static const unsigned char kRtpPacketSsrc1[] = {
+ 0x80, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x11,
+};
+
+// SSRC = 0x2222
+static const unsigned char kRtpPacketSsrc2[] = {
+ 0x80, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x22,
+};
+
+// SSRC = 0
+static const unsigned char kRtpPacketInvalidSsrc[] = {
+ 0x80, 0x80, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+// invalid size
+static const unsigned char kRtpPacketTooSmall[] = {
+ 0x80, 0x80, 0x00, 0x00,
+};
+
+// PT = 200 = SR, len = 28, SSRC of sender = 0x0001
+// NTP TS = 0, RTP TS = 0, packet count = 0
+static const unsigned char kRtcpPacketSrSsrc01[] = {
+ 0x80, 0xC8, 0x00, 0x1B, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+// PT = 200 = SR, len = 28, SSRC of sender = 0x2222
+// NTP TS = 0, RTP TS = 0, packet count = 0
+static const unsigned char kRtcpPacketSrSsrc2[] = {
+ 0x80, 0xC8, 0x00, 0x1B, 0x00, 0x00, 0x22, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+};
+
+// First packet - SR = PT = 200, len = 0, SSRC of sender = 0x1111
+// NTP TS = 0, RTP TS = 0, packet count = 0
+// second packet - SDES = PT = 202, count = 0, SSRC = 0x1111, cname len = 0
+static const unsigned char kRtcpPacketCompoundSrSdesSsrc1[] = {
+ 0x80, 0xC8, 0x00, 0x01, 0x00, 0x00, 0x11, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x81, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x11, 0x11, 0x01, 0x00,
+};
+
+// SDES = PT = 202, count = 0, SSRC = 0x2222, cname len = 0
+static const unsigned char kRtcpPacketSdesSsrc2[] = {
+ 0x81, 0xCA, 0x00, 0x00, 0x00, 0x00, 0x22, 0x22, 0x01, 0x00,
+};
+
+// Packet has only mandatory fixed RTCP header
+static const unsigned char kRtcpPacketFixedHeaderOnly[] = {
+ 0x80, 0xC8, 0x00, 0x00,
+};
+
+// Small packet for SSRC demux.
+static const unsigned char kRtcpPacketTooSmall[] = {
+ 0x80, 0xC8, 0x00, 0x00, 0x00, 0x00,
+};
+
+// PT = 206, FMT = 1, Sender SSRC = 0x1111, Media SSRC = 0x1111
+// No FCI information is needed for PLI.
+static const unsigned char kRtcpPacketNonCompoundRtcpPliFeedback[] = {
+ 0x81, 0xCE, 0x00, 0x0C, 0x00, 0x00, 0x11, 0x11, 0x00, 0x00, 0x11, 0x11,
+};
+
+TEST(SsrcMuxFilterTest, AddRemoveStreamTest) {
+ cricket::SsrcMuxFilter ssrc_filter;
+ EXPECT_FALSE(ssrc_filter.IsActive());
+ EXPECT_TRUE(ssrc_filter.AddStream(StreamParams::CreateLegacy(kSsrc1)));
+ StreamParams stream2;
+ stream2.ssrcs.push_back(kSsrc2);
+ stream2.ssrcs.push_back(kSsrc3);
+ EXPECT_TRUE(ssrc_filter.AddStream(stream2));
+
+ EXPECT_TRUE(ssrc_filter.IsActive());
+ EXPECT_TRUE(ssrc_filter.FindStream(kSsrc1));
+ EXPECT_TRUE(ssrc_filter.FindStream(kSsrc2));
+ EXPECT_TRUE(ssrc_filter.FindStream(kSsrc3));
+ EXPECT_TRUE(ssrc_filter.RemoveStream(kSsrc1));
+ EXPECT_FALSE(ssrc_filter.FindStream(kSsrc1));
+ EXPECT_TRUE(ssrc_filter.RemoveStream(kSsrc3));
+ EXPECT_FALSE(ssrc_filter.RemoveStream(kSsrc2)); // Already removed.
+ EXPECT_FALSE(ssrc_filter.IsActive());
+}
+
+TEST(SsrcMuxFilterTest, RtpPacketTest) {
+ cricket::SsrcMuxFilter ssrc_filter;
+ EXPECT_TRUE(ssrc_filter.AddStream(StreamParams::CreateLegacy(kSsrc1)));
+ EXPECT_TRUE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtpPacketSsrc1),
+ sizeof(kRtpPacketSsrc1), false));
+ EXPECT_TRUE(ssrc_filter.AddStream(StreamParams::CreateLegacy(kSsrc2)));
+ EXPECT_TRUE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtpPacketSsrc2),
+ sizeof(kRtpPacketSsrc2), false));
+ EXPECT_TRUE(ssrc_filter.RemoveStream(kSsrc2));
+ EXPECT_FALSE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtpPacketSsrc2),
+ sizeof(kRtpPacketSsrc2), false));
+ EXPECT_FALSE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtpPacketInvalidSsrc),
+ sizeof(kRtpPacketInvalidSsrc), false));
+ EXPECT_FALSE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtpPacketTooSmall),
+ sizeof(kRtpPacketTooSmall), false));
+}
+
+TEST(SsrcMuxFilterTest, RtcpPacketTest) {
+ cricket::SsrcMuxFilter ssrc_filter;
+ EXPECT_TRUE(ssrc_filter.AddStream(StreamParams::CreateLegacy(kSsrc1)));
+ EXPECT_TRUE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtcpPacketCompoundSrSdesSsrc1),
+ sizeof(kRtcpPacketCompoundSrSdesSsrc1), true));
+ EXPECT_TRUE(ssrc_filter.AddStream(StreamParams::CreateLegacy(kSsrc2)));
+ EXPECT_TRUE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtcpPacketSrSsrc2),
+ sizeof(kRtcpPacketSrSsrc2), true));
+ EXPECT_TRUE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtcpPacketSdesSsrc2),
+ sizeof(kRtcpPacketSdesSsrc2), true));
+ EXPECT_TRUE(ssrc_filter.RemoveStream(kSsrc2));
+ // RTCP Packets other than SR and RR are demuxed regardless of SSRC.
+ EXPECT_TRUE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtcpPacketSdesSsrc2),
+ sizeof(kRtcpPacketSdesSsrc2), true));
+ // RTCP Packets with 'special' SSRC 0x01 are demuxed also
+ EXPECT_TRUE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtcpPacketSrSsrc01),
+ sizeof(kRtcpPacketSrSsrc01), true));
+ EXPECT_FALSE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtcpPacketSrSsrc2),
+ sizeof(kRtcpPacketSrSsrc2), true));
+ EXPECT_FALSE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtcpPacketFixedHeaderOnly),
+ sizeof(kRtcpPacketFixedHeaderOnly), true));
+ EXPECT_FALSE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtcpPacketTooSmall),
+ sizeof(kRtcpPacketTooSmall), true));
+ EXPECT_TRUE(ssrc_filter.DemuxPacket(
+ reinterpret_cast<const char*>(kRtcpPacketNonCompoundRtcpPliFeedback),
+ sizeof(kRtcpPacketNonCompoundRtcpPliFeedback), true));
+}
diff --git a/chromium/third_party/libjingle/source/talk/session/media/typewrapping.h.pump b/chromium/third_party/libjingle/source/talk/session/media/typewrapping.h.pump
new file mode 100644
index 00000000000..3b529277fce
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/typewrapping.h.pump
@@ -0,0 +1,297 @@
+// To generate typewrapping.h from typewrapping.h.pump, execute:
+// /home/build/google3/third_party/gtest/scripts/pump.py typewrapping.h.pump
+
+// Copyright 2009 Google Inc.
+// Author: tschmelcher@google.com (Tristan Schmelcher)
+//
+// A template meta-programming framework for customizable rule-based
+// type-checking of type wrappers and wrapper functions.
+//
+// This framework is useful in a scenario where there are a set of types that
+// you choose to "wrap" by implementing new preferred types such that the new
+// and the old can be converted back and forth in some way, but you already have
+// a library of functions that expect the original types. Example:
+//
+// Type A wraps X
+// Type B wraps Y
+// Type C wraps Z
+//
+// And function X Foo(Y, Z) exists.
+//
+// Since A, B, and C are preferred, you choose to implement a wrapper function
+// with this interface:
+//
+// A Foo2(B, C)
+//
+// However, this can lead to subtle discrepancies, because if the interface to
+// Foo ever changes then Foo2 may become out-of-sync. e.g., Foo might have
+// originally returned void, but later is changed to return an error code. If
+// the programmer forgets to change Foo2, the code will probably still work, but
+// with an implicit cast to void inserted by the compiler, potentially leading
+// to run-time errors or errors in usage.
+//
+// The purpose of this library is to prevent these discrepancies from occurring.
+// You use it as follows:
+//
+// First, declare a new wrapping ruleset:
+//
+// DECLARE_WRAPPING_RULESET(ruleset_name)
+//
+// Then declare rules on what types wrap which other types and how to convert
+// them:
+//
+// DECLARE_WRAPPER(ruleset_name, A, X, variable_name, wrapping_code,
+// unwrapping_code)
+//
+// Where wrapping_code and unwrapping_code are expressions giving the code to
+// use to wrap and unwrap a variable with the name "variable_name". There are
+// also some helper macros to declare common wrapping schemes.
+//
+// Then implement your wrapped functions like this:
+//
+// A Foo_Wrapped(B b, C c) {
+// return WRAP_CALL2(ruleset_name, A, Foo, B, b, C, c);
+// }
+//
+// WRAP_CALL2 will unwrap b and c (if B and C are wrapped types) and call Foo,
+// then wrap the result to type A if different from the return type. More
+// importantly, if the types in Foo's interface do not _exactly_ match the
+// unwrapped forms of A, B, and C (after typedef-equivalence), then you will get
+// a compile-time error for a static_cast from the real function type to the
+// expected one (except on Mac where this check is infeasible), and with no icky
+// template instantiation errors either!
+//
+// There are also macros to wrap/unwrap individual values according to whichever
+// rule applies to their types:
+//
+// WRAP(ruleset_name, A, X, value) // Compile-time error if no associated rule.
+//
+// UNWRAP(ruleset_name, A, value) // Infers X. If A is not a wrapper, no change.
+//
+// UNWRAP_TYPE(ruleset_name, A) // Evaluates to X.
+//
+//
+// Essentially, the library works by "storing" the DECLARE_WRAPPER calls in
+// template specializations. When the wrapper or unwrapper is invoked, the
+// normal C++ template system essentially "looks up" the rule for the given
+// type(s).
+//
+// All of the auto-generated code can be inlined to produce zero impact on
+// run-time performance and code size (though some compilers may require
+// gentle encouragement in order for them to do so).
+
+#ifndef TALK_SESSION_PHONE_TYPEWRAPPING_H_
+#define TALK_SESSION_PHONE_TYPEWRAPPING_H_
+
+#include "talk/base/common.h"
+
+#ifdef OSX
+// XCode's GCC doesn't respect typedef-equivalence when casting function pointer
+// types, so we can't enforce that the wrapped function signatures strictly
+// match the expected types. Instead we have to forego the nice user-friendly
+// static_cast check (because it will spuriously fail) and make the Call()
+// function into a member template below.
+#define CAST_FUNCTION_(function, ...) \
+ function
+#else
+#define CAST_FUNCTION_(function, ...) \
+ static_cast<__VA_ARGS__>(function)
+#endif
+
+// Internal helper macros.
+#define SMART_WRAPPER_(wrapper, toType, fromType, from) \
+ (wrapper<toType, fromType>::Wrap(from))
+
+#define SMART_UNWRAPPER_(unwrapper, fromType, from) \
+ (unwrapper<fromType>::Unwrap(from))
+
+#define SMART_UNWRAPPER_TYPE_(unwrapper, fromType) \
+ typename unwrapper<fromType>::ToType
+
+$var n = 27
+$range i 0..n
+
+$for i [[
+$range j 1..i
+
+// The code that follows wraps calls to $i-argument functions, unwrapping the
+// arguments and wrapping the return value as needed.
+
+// The usual case.
+template<
+ template <typename ToType, typename FromType> class Wrapper,
+ template <typename FromType> class Unwrapper,
+ typename ReturnType$for j [[,
+ typename ArgType$j]]>
+class SmartFunctionWrapper$i {
+ public:
+ typedef SMART_UNWRAPPER_TYPE_(Unwrapper, ReturnType) OriginalReturnType;
+
+$for j [[
+ typedef SMART_UNWRAPPER_TYPE_(Unwrapper, ArgType$j) OriginalArgType$j;
+
+]]
+ typedef OriginalReturnType (*OriginalFunctionType)($for j , [[
+
+ OriginalArgType$j]]);
+
+#ifdef OSX
+ template <typename F>
+ static FORCE_INLINE ReturnType Call(F function
+#else
+ static FORCE_INLINE ReturnType Call(OriginalFunctionType function
+#endif
+ $for j [[,
+ ArgType$j v$j]]) {
+ return SMART_WRAPPER_(Wrapper, ReturnType, OriginalReturnType,
+ (*function)($for j , [[
+
+ SMART_UNWRAPPER_(Unwrapper, ArgType$j, v$j)]]));
+ }
+};
+
+// Special case for functions that return void. (SMART_WRAPPER_ involves
+// passing the unwrapped value in a function call, which is not a legal thing to
+// do with void, so we need a special case here that doesn't call
+// SMART_WRAPPER_()).
+template<
+ template <typename ToType, typename FromType> class Wrapper,
+ template <typename FromType> class Unwrapper$for j [[,
+ typename ArgType$j]]>
+class SmartFunctionWrapper$i<
+ Wrapper,
+ Unwrapper,
+ void$for j [[,
+ ArgType$j]]> {
+ public:
+ typedef void OriginalReturnType;
+
+$for j [[
+ typedef SMART_UNWRAPPER_TYPE_(Unwrapper, ArgType$j) OriginalArgType$j;
+
+]]
+ typedef OriginalReturnType (*OriginalFunctionType)($for j , [[
+
+ OriginalArgType$j]]);
+
+#ifdef OSX
+ template <typename F>
+ static FORCE_INLINE void Call(F function
+#else
+ static FORCE_INLINE void Call(OriginalFunctionType function
+#endif
+ $for j [[,
+ ArgType$j v$j]]) {
+ (*function)($for j , [[
+
+ SMART_UNWRAPPER_(Unwrapper, ArgType$j, v$j)]]);
+ }
+};
+
+
+]]
+// Programmer interface follows. Only macros below here should be used outside
+// this file.
+
+#define DECLARE_WRAPPING_RULESET(ruleSet) \
+ namespace ruleSet { \
+\
+ /* SmartWrapper is for wrapping values. */ \
+ template<typename ToType, typename FromType> \
+ class SmartWrapper; \
+\
+ /* Special case where the types are the same. */ \
+ template<typename T1> \
+ class SmartWrapper<T1, T1> { \
+ public: \
+ static FORCE_INLINE T1 Wrap(T1 from) { \
+ return from; \
+ } \
+ }; \
+\
+ /* Class for unwrapping (i.e., going to the original value). This is done
+ function-style rather than predicate-style. The default rule is to leave
+ the type unchanged. */ \
+ template<typename FromType> \
+ class SmartUnwrapper { \
+ public: \
+ typedef FromType ToType; \
+ static FORCE_INLINE ToType Unwrap(FromType from) { \
+ return from; \
+ } \
+ }; \
+\
+ }
+
+// Declares a wrapping rule.
+#define DECLARE_WRAPPER(ruleSet, wrappedType, unwrappedType, var, wrapCode, unwrapCode) \
+ namespace ruleSet { \
+\
+ template<> \
+ class SmartWrapper<wrappedType, unwrappedType> { \
+ public: \
+ static FORCE_INLINE wrappedType Wrap(unwrappedType var) { \
+ return wrapCode; \
+ } \
+ }; \
+\
+ template<> \
+ class SmartUnwrapper<wrappedType> { \
+ public: \
+ typedef unwrappedType ToType; \
+ static FORCE_INLINE unwrappedType Unwrap(wrappedType var) { \
+ return unwrapCode; \
+ } \
+ }; \
+\
+ }
+
+// Helper macro for declaring a wrapper that wraps/unwraps with reinterpret_cast<>.
+#define DECLARE_WRAPPER_BY_REINTERPRET_CAST(ruleSet, wrappedType, unwrappedType) \
+ DECLARE_WRAPPER(ruleSet, wrappedType, unwrappedType, FROM, reinterpret_cast<wrappedType>(FROM), reinterpret_cast<unwrappedType>(FROM))
+
+// Helper macro for declaring a wrapper that wraps/unwraps implicitly.
+#define DECLARE_WRAPPER_BY_IMPLICIT_CAST(ruleSet, wrappedType, unwrappedType) \
+ DECLARE_WRAPPER(ruleSet, wrappedType, unwrappedType, FROM, FROM, FROM)
+
+// Helper macro for declaring that the pointer types for one type wrap the pointer types for another type.
+#define DECLARE_POINTER_WRAPPER(ruleSet, wrappedType, unwrappedType) \
+ DECLARE_WRAPPER_BY_REINTERPRET_CAST(ruleSet, wrappedType*, unwrappedType*) \
+ DECLARE_WRAPPER_BY_REINTERPRET_CAST(ruleSet, const wrappedType*, const unwrappedType*) \
+ DECLARE_WRAPPER_BY_REINTERPRET_CAST(ruleSet, wrappedType* const, unwrappedType* const) \
+ DECLARE_WRAPPER_BY_REINTERPRET_CAST(ruleSet, const wrappedType* const, const unwrappedType* const) \
+
+// Macro to wrap a single value.
+#define WRAP(ruleSet, toType, fromType, from) \
+ SMART_WRAPPER_(ruleSet::SmartWrapper, toType, fromType, from)
+
+// Macro to unwrap a single value.
+#define UNWRAP(ruleSet, fromType, from) \
+ SMART_UNWRAPPER_(ruleSet::SmartUnwrapper, fromType, from)
+
+// Macro to get the unwrapped form of a type.
+#define UNWRAP_TYPE(ruleSet, fromType) \
+ SMART_UNWRAPPER_TYPE_(ruleSet::SmartUnwrapper, from)
+
+// Macros to wrap function calls.
+
+$for i [[
+$range j 1..i
+#define WRAP_CALL$i(ruleSet, toType, function$for j [[, argType$j, arg$j]]) \
+ (SmartFunctionWrapper$i< \
+ ruleSet::SmartWrapper, \
+ ruleSet::SmartUnwrapper, \
+ toType$for j [[, \
+ argType$j]]>::Call( \
+ CAST_FUNCTION_( \
+ &function, \
+ SmartFunctionWrapper$i< \
+ ruleSet::SmartWrapper, \
+ ruleSet::SmartUnwrapper, \
+ toType$for j [[, \
+ argType$j]]>::OriginalFunctionType)$for j [[, \
+ arg$j]]))
+
+]]
+
+#endif // TALK_SESSION_PHONE_TYPEWRAPPINGHELPERS_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/media/typingmonitor.cc b/chromium/third_party/libjingle/source/talk/session/media/typingmonitor.cc
new file mode 100644
index 00000000000..3c5d387b835
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/typingmonitor.cc
@@ -0,0 +1,123 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/session/media/typingmonitor.h"
+
+#include "talk/base/logging.h"
+#include "talk/base/thread.h"
+#include "talk/session/media/channel.h"
+
+namespace cricket {
+
+TypingMonitor::TypingMonitor(VoiceChannel* channel,
+ talk_base::Thread* worker_thread,
+ const TypingMonitorOptions& settings)
+ : channel_(channel),
+ worker_thread_(worker_thread),
+ mute_period_(settings.mute_period),
+ muted_at_(0),
+ has_pending_unmute_(false) {
+ channel_->media_channel()->SignalMediaError.connect(
+ this, &TypingMonitor::OnVoiceChannelError);
+ channel_->media_channel()->SetTypingDetectionParameters(
+ settings.time_window, settings.cost_per_typing,
+ settings.reporting_threshold, settings.penalty_decay,
+ settings.type_event_delay);
+}
+
+TypingMonitor::~TypingMonitor() {
+ // Shortcut any pending unmutes.
+ if (has_pending_unmute_) {
+ talk_base::MessageList messages;
+ worker_thread_->Clear(this, 0, &messages);
+ ASSERT(messages.size() == 1);
+ channel_->MuteStream(0, false);
+ SignalMuted(channel_, false);
+ }
+}
+
+void TypingMonitor::OnVoiceChannelError(uint32 ssrc,
+ VoiceMediaChannel::Error error) {
+ if (error == VoiceMediaChannel::ERROR_REC_TYPING_NOISE_DETECTED &&
+ !channel_->IsStreamMuted(0)) {
+ // Please be careful and cognizant about threading issues when editing this
+ // code. The MuteStream() call below is a ::Send and is synchronous as well
+ // as the muted signal that comes from this. This function can be called
+ // from any thread.
+
+ // TODO(perkj): Refactor TypingMonitor and the MediaChannel to handle
+ // multiple sending audio streams. SSRC 0 means the default sending audio
+ // channel.
+ channel_->MuteStream(0, true);
+ SignalMuted(channel_, true);
+ has_pending_unmute_ = true;
+ muted_at_ = talk_base::Time();
+
+ worker_thread_->PostDelayed(mute_period_, this, 0);
+ LOG(LS_INFO) << "Muting for at least " << mute_period_ << "ms.";
+ }
+}
+
+/**
+ * If we mute due to detected typing and the user also mutes during our waiting
+ * period, we don't want to undo their mute. So, clear our callback. Should
+ * be called on the worker_thread.
+ */
+void TypingMonitor::OnChannelMuted() {
+ if (has_pending_unmute_) {
+ talk_base::MessageList removed;
+ worker_thread_->Clear(this, 0, &removed);
+ ASSERT(removed.size() == 1);
+ has_pending_unmute_ = false;
+ }
+}
+
+/**
+ * When the specified mute period has elapsed, unmute, or, if the user kept
+ * typing after the initial warning fired, wait for the remainder of time to
+ * elapse since they finished and try to unmute again. Should be called on the
+ * worker thread.
+ */
+void TypingMonitor::OnMessage(talk_base::Message* msg) {
+ if (!channel_->IsStreamMuted(0) || !has_pending_unmute_) return;
+ int silence_period = channel_->media_channel()->GetTimeSinceLastTyping();
+ int expiry_time = mute_period_ - silence_period;
+ if (silence_period < 0 || expiry_time < 50) {
+ LOG(LS_INFO) << "Mute timeout hit, last typing " << silence_period
+ << "ms ago, unmuting after " << talk_base::TimeSince(muted_at_)
+ << "ms total.";
+ has_pending_unmute_ = false;
+ channel_->MuteStream(0, false);
+ SignalMuted(channel_, false);
+ } else {
+ LOG(LS_INFO) << "Mute timeout hit, last typing " << silence_period
+ << "ms ago, check again in " << expiry_time << "ms.";
+ talk_base::Thread::Current()->PostDelayed(expiry_time, this, 0);
+ }
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/typingmonitor.h b/chromium/third_party/libjingle/source/talk/session/media/typingmonitor.h
new file mode 100644
index 00000000000..c9b64e79c3d
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/typingmonitor.h
@@ -0,0 +1,84 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_MEDIA_TYPINGMONITOR_H_
+#define TALK_SESSION_MEDIA_TYPINGMONITOR_H_
+
+#include "talk/base/messagehandler.h"
+#include "talk/media/base/mediachannel.h"
+
+namespace talk_base {
+class Thread;
+}
+
+namespace cricket {
+
+class VoiceChannel;
+class BaseChannel;
+
+struct TypingMonitorOptions {
+ int cost_per_typing;
+ int mute_period;
+ int penalty_decay;
+ int reporting_threshold;
+ int time_window;
+ int type_event_delay;
+ size_t min_participants;
+};
+
+/**
+ * An object that observes a channel and listens for typing detection warnings,
+ * which can be configured to mute audio capture of that channel for some period
+ * of time. The purpose is to automatically mute someone if they are disturbing
+ * a conference with loud keystroke audio signals.
+ */
+class TypingMonitor
+ : public talk_base::MessageHandler, public sigslot::has_slots<> {
+ public:
+ TypingMonitor(VoiceChannel* channel, talk_base::Thread* worker_thread,
+ const TypingMonitorOptions& params);
+ ~TypingMonitor();
+
+ sigslot::signal2<BaseChannel*, bool> SignalMuted;
+
+ void OnChannelMuted();
+
+ private:
+ void OnVoiceChannelError(uint32 ssrc, VoiceMediaChannel::Error error);
+ void OnMessage(talk_base::Message* msg);
+
+ VoiceChannel* channel_;
+ talk_base::Thread* worker_thread_;
+ int mute_period_;
+ int muted_at_;
+ bool has_pending_unmute_;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_MEDIA_TYPINGMONITOR_H_
+
diff --git a/chromium/third_party/libjingle/source/talk/session/media/typingmonitor_unittest.cc b/chromium/third_party/libjingle/source/talk/session/media/typingmonitor_unittest.cc
new file mode 100644
index 00000000000..eb8c5bc542a
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/typingmonitor_unittest.cc
@@ -0,0 +1,92 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/base/gunit.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/p2p/base/fakesession.h"
+#include "talk/session/media/channel.h"
+#include "talk/session/media/currentspeakermonitor.h"
+#include "talk/session/media/typingmonitor.h"
+
+namespace cricket {
+
+class TypingMonitorTest : public testing::Test {
+ protected:
+ TypingMonitorTest() : session_(true) {
+ vc_.reset(new VoiceChannel(talk_base::Thread::Current(), &engine_,
+ engine_.CreateChannel(), &session_, "", false));
+ engine_.GetVoiceChannel(0)->set_time_since_last_typing(1000);
+
+ TypingMonitorOptions settings = {10, 20, 30, 40, 50};
+ monitor_.reset(new TypingMonitor(vc_.get(),
+ talk_base::Thread::Current(),
+ settings));
+ }
+
+ void TearDown() {
+ vc_.reset();
+ }
+
+ talk_base::scoped_ptr<TypingMonitor> monitor_;
+ talk_base::scoped_ptr<VoiceChannel> vc_;
+ FakeMediaEngine engine_;
+ FakeSession session_;
+};
+
+TEST_F(TypingMonitorTest, TestTriggerMute) {
+ EXPECT_FALSE(vc_->IsStreamMuted(0));
+ EXPECT_FALSE(engine_.GetVoiceChannel(0)->IsStreamMuted(0));
+
+ engine_.GetVoiceChannel(0)->TriggerError(0, VoiceMediaChannel::ERROR_OTHER);
+ EXPECT_FALSE(vc_->IsStreamMuted(0));
+ EXPECT_FALSE(engine_.GetVoiceChannel(0)->IsStreamMuted(0));
+
+ engine_.GetVoiceChannel(0)->TriggerError(
+ 0, VoiceMediaChannel::ERROR_REC_TYPING_NOISE_DETECTED);
+ EXPECT_TRUE(vc_->IsStreamMuted(0));
+ EXPECT_TRUE(engine_.GetVoiceChannel(0)->IsStreamMuted(0));
+
+ EXPECT_TRUE_WAIT(!vc_->IsStreamMuted(0) &&
+ !engine_.GetVoiceChannel(0)->IsStreamMuted(0), 100);
+}
+
+TEST_F(TypingMonitorTest, TestResetMonitor) {
+ engine_.GetVoiceChannel(0)->set_time_since_last_typing(1000);
+ EXPECT_FALSE(vc_->IsStreamMuted(0));
+ EXPECT_FALSE(engine_.GetVoiceChannel(0)->IsStreamMuted(0));
+
+ engine_.GetVoiceChannel(0)->TriggerError(
+ 0, VoiceMediaChannel::ERROR_REC_TYPING_NOISE_DETECTED);
+ EXPECT_TRUE(vc_->IsStreamMuted(0));
+ EXPECT_TRUE(engine_.GetVoiceChannel(0)->IsStreamMuted(0));
+
+ monitor_.reset();
+ EXPECT_FALSE(vc_->IsStreamMuted(0));
+ EXPECT_FALSE(engine_.GetVoiceChannel(0)->IsStreamMuted(0));
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/media/voicechannel.h b/chromium/third_party/libjingle/source/talk/session/media/voicechannel.h
new file mode 100644
index 00000000000..6c1b6afdd55
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/media/voicechannel.h
@@ -0,0 +1,33 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VOICECHANNEL_H_
+#define _VOICECHANNEL_H_
+
+#include "talk/session/media/channel.h"
+
+#endif // _VOICECHANNEL_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/tunnel/pseudotcpchannel.cc b/chromium/third_party/libjingle/source/talk/session/tunnel/pseudotcpchannel.cc
new file mode 100644
index 00000000000..8b9a19f0b32
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/tunnel/pseudotcpchannel.cc
@@ -0,0 +1,600 @@
+/*
+ * libjingle
+ * Copyright 2004--2006, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+#include "talk/base/basictypes.h"
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stringutils.h"
+#include "talk/p2p/base/candidate.h"
+#include "talk/p2p/base/transportchannel.h"
+#include "pseudotcpchannel.h"
+
+using namespace talk_base;
+
+namespace cricket {
+
+extern const talk_base::ConstantLabel SESSION_STATES[];
+
+// MSG_WK_* - worker thread messages
+// MSG_ST_* - stream thread messages
+// MSG_SI_* - signal thread messages
+
+enum {
+ MSG_WK_CLOCK = 1,
+ MSG_WK_PURGE,
+ MSG_ST_EVENT,
+ MSG_SI_DESTROYCHANNEL,
+ MSG_SI_DESTROY,
+};
+
+struct EventData : public MessageData {
+ int event, error;
+ EventData(int ev, int err = 0) : event(ev), error(err) { }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// PseudoTcpChannel::InternalStream
+///////////////////////////////////////////////////////////////////////////////
+
+class PseudoTcpChannel::InternalStream : public StreamInterface {
+public:
+ InternalStream(PseudoTcpChannel* parent);
+ virtual ~InternalStream();
+
+ virtual StreamState GetState() const;
+ virtual StreamResult Read(void* buffer, size_t buffer_len,
+ size_t* read, int* error);
+ virtual StreamResult Write(const void* data, size_t data_len,
+ size_t* written, int* error);
+ virtual void Close();
+
+private:
+ // parent_ is accessed and modified exclusively on the event thread, to
+ // avoid thread contention. This means that the PseudoTcpChannel cannot go
+ // away until after it receives a Close() from TunnelStream.
+ PseudoTcpChannel* parent_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// PseudoTcpChannel
+// Member object lifetime summaries:
+// session_ - passed in constructor, cleared when channel_ goes away.
+// channel_ - created in Connect, destroyed when session_ or tcp_ goes away.
+// tcp_ - created in Connect, destroyed when channel_ goes away, or connection
+// closes.
+// worker_thread_ - created when channel_ is created, purged when channel_ is
+// destroyed.
+// stream_ - created in GetStream, destroyed by owner at arbitrary time.
+// this - created in constructor, destroyed when worker_thread_ and stream_
+// are both gone.
+///////////////////////////////////////////////////////////////////////////////
+
+//
+// Signal thread methods
+//
+
+PseudoTcpChannel::PseudoTcpChannel(Thread* stream_thread, Session* session)
+ : signal_thread_(session->session_manager()->signaling_thread()),
+ worker_thread_(NULL),
+ stream_thread_(stream_thread),
+ session_(session), channel_(NULL), tcp_(NULL), stream_(NULL),
+ stream_readable_(false), pending_read_event_(false),
+ ready_to_connect_(false) {
+ ASSERT(signal_thread_->IsCurrent());
+ ASSERT(NULL != session_);
+}
+
+PseudoTcpChannel::~PseudoTcpChannel() {
+ ASSERT(signal_thread_->IsCurrent());
+ ASSERT(worker_thread_ == NULL);
+ ASSERT(session_ == NULL);
+ ASSERT(channel_ == NULL);
+ ASSERT(stream_ == NULL);
+ ASSERT(tcp_ == NULL);
+}
+
+bool PseudoTcpChannel::Connect(const std::string& content_name,
+ const std::string& channel_name,
+ int component) {
+ ASSERT(signal_thread_->IsCurrent());
+ CritScope lock(&cs_);
+
+ if (channel_)
+ return false;
+
+ ASSERT(session_ != NULL);
+ worker_thread_ = session_->session_manager()->worker_thread();
+ content_name_ = content_name;
+ channel_ = session_->CreateChannel(
+ content_name, channel_name, component);
+ channel_name_ = channel_name;
+ channel_->SetOption(Socket::OPT_DONTFRAGMENT, 1);
+
+ channel_->SignalDestroyed.connect(this,
+ &PseudoTcpChannel::OnChannelDestroyed);
+ channel_->SignalWritableState.connect(this,
+ &PseudoTcpChannel::OnChannelWritableState);
+ channel_->SignalReadPacket.connect(this,
+ &PseudoTcpChannel::OnChannelRead);
+ channel_->SignalRouteChange.connect(this,
+ &PseudoTcpChannel::OnChannelConnectionChanged);
+
+ ASSERT(tcp_ == NULL);
+ tcp_ = new PseudoTcp(this, 0);
+ if (session_->initiator()) {
+ // Since we may try several protocols and network adapters that won't work,
+ // waiting until we get our first writable notification before initiating
+ // TCP negotiation.
+ ready_to_connect_ = true;
+ }
+
+ return true;
+}
+
+StreamInterface* PseudoTcpChannel::GetStream() {
+ ASSERT(signal_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ ASSERT(NULL != session_);
+ if (!stream_)
+ stream_ = new PseudoTcpChannel::InternalStream(this);
+ //TODO("should we disallow creation of new stream at some point?");
+ return stream_;
+}
+
+void PseudoTcpChannel::OnChannelDestroyed(TransportChannel* channel) {
+ LOG_F(LS_INFO) << "(" << channel->component() << ")";
+ ASSERT(signal_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ ASSERT(channel == channel_);
+ signal_thread_->Clear(this, MSG_SI_DESTROYCHANNEL);
+ // When MSG_WK_PURGE is received, we know there will be no more messages from
+ // the worker thread.
+ worker_thread_->Clear(this, MSG_WK_CLOCK);
+ worker_thread_->Post(this, MSG_WK_PURGE);
+ session_ = NULL;
+ channel_ = NULL;
+ if ((stream_ != NULL)
+ && ((tcp_ == NULL) || (tcp_->State() != PseudoTcp::TCP_CLOSED)))
+ stream_thread_->Post(this, MSG_ST_EVENT, new EventData(SE_CLOSE, 0));
+ if (tcp_) {
+ tcp_->Close(true);
+ AdjustClock();
+ }
+ SignalChannelClosed(this);
+}
+
+void PseudoTcpChannel::OnSessionTerminate(Session* session) {
+ // When the session terminates before we even connected
+ CritScope lock(&cs_);
+ if (session_ != NULL && channel_ == NULL) {
+ ASSERT(session == session_);
+ ASSERT(worker_thread_ == NULL);
+ ASSERT(tcp_ == NULL);
+ LOG(LS_INFO) << "Destroying unconnected PseudoTcpChannel";
+ session_ = NULL;
+ if (stream_ != NULL)
+ stream_thread_->Post(this, MSG_ST_EVENT, new EventData(SE_CLOSE, -1));
+ }
+
+ // Even though session_ is being destroyed, we mustn't clear the pointer,
+ // since we'll need it to tear down channel_.
+ //
+ // TODO: Is it always the case that if channel_ != NULL then we'll get
+ // a channel-destroyed notification?
+}
+
+void PseudoTcpChannel::GetOption(PseudoTcp::Option opt, int* value) {
+ ASSERT(signal_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ ASSERT(tcp_ != NULL);
+ tcp_->GetOption(opt, value);
+}
+
+void PseudoTcpChannel::SetOption(PseudoTcp::Option opt, int value) {
+ ASSERT(signal_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ ASSERT(tcp_ != NULL);
+ tcp_->SetOption(opt, value);
+}
+
+//
+// Stream thread methods
+//
+
+StreamState PseudoTcpChannel::GetState() const {
+ ASSERT(stream_ != NULL && stream_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ if (!session_)
+ return SS_CLOSED;
+ if (!tcp_)
+ return SS_OPENING;
+ switch (tcp_->State()) {
+ case PseudoTcp::TCP_LISTEN:
+ case PseudoTcp::TCP_SYN_SENT:
+ case PseudoTcp::TCP_SYN_RECEIVED:
+ return SS_OPENING;
+ case PseudoTcp::TCP_ESTABLISHED:
+ return SS_OPEN;
+ case PseudoTcp::TCP_CLOSED:
+ default:
+ return SS_CLOSED;
+ }
+}
+
+StreamResult PseudoTcpChannel::Read(void* buffer, size_t buffer_len,
+ size_t* read, int* error) {
+ ASSERT(stream_ != NULL && stream_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ if (!tcp_)
+ return SR_BLOCK;
+
+ stream_readable_ = false;
+ int result = tcp_->Recv(static_cast<char*>(buffer), buffer_len);
+ //LOG_F(LS_VERBOSE) << "Recv returned: " << result;
+ if (result > 0) {
+ if (read)
+ *read = result;
+ // PseudoTcp doesn't currently support repeated Readable signals. Simulate
+ // them here.
+ stream_readable_ = true;
+ if (!pending_read_event_) {
+ pending_read_event_ = true;
+ stream_thread_->Post(this, MSG_ST_EVENT, new EventData(SE_READ), true);
+ }
+ return SR_SUCCESS;
+ } else if (IsBlockingError(tcp_->GetError())) {
+ return SR_BLOCK;
+ } else {
+ if (error)
+ *error = tcp_->GetError();
+ return SR_ERROR;
+ }
+ // This spot is never reached.
+}
+
+StreamResult PseudoTcpChannel::Write(const void* data, size_t data_len,
+ size_t* written, int* error) {
+ ASSERT(stream_ != NULL && stream_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ if (!tcp_)
+ return SR_BLOCK;
+ int result = tcp_->Send(static_cast<const char*>(data), data_len);
+ //LOG_F(LS_VERBOSE) << "Send returned: " << result;
+ if (result > 0) {
+ if (written)
+ *written = result;
+ return SR_SUCCESS;
+ } else if (IsBlockingError(tcp_->GetError())) {
+ return SR_BLOCK;
+ } else {
+ if (error)
+ *error = tcp_->GetError();
+ return SR_ERROR;
+ }
+ // This spot is never reached.
+}
+
+void PseudoTcpChannel::Close() {
+ ASSERT(stream_ != NULL && stream_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ stream_ = NULL;
+ // Clear out any pending event notifications
+ stream_thread_->Clear(this, MSG_ST_EVENT);
+ if (tcp_) {
+ tcp_->Close(false);
+ AdjustClock();
+ } else {
+ CheckDestroy();
+ }
+}
+
+//
+// Worker thread methods
+//
+
+void PseudoTcpChannel::OnChannelWritableState(TransportChannel* channel) {
+ LOG_F(LS_VERBOSE) << "[" << channel_name_ << "]";
+ ASSERT(worker_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ if (!channel_) {
+ LOG_F(LS_WARNING) << "NULL channel";
+ return;
+ }
+ ASSERT(channel == channel_);
+ if (!tcp_) {
+ LOG_F(LS_WARNING) << "NULL tcp";
+ return;
+ }
+ if (!ready_to_connect_ || !channel->writable())
+ return;
+
+ ready_to_connect_ = false;
+ tcp_->Connect();
+ AdjustClock();
+}
+
+void PseudoTcpChannel::OnChannelRead(TransportChannel* channel,
+ const char* data, size_t size, int flags) {
+ //LOG_F(LS_VERBOSE) << "(" << size << ")";
+ ASSERT(worker_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ if (!channel_) {
+ LOG_F(LS_WARNING) << "NULL channel";
+ return;
+ }
+ ASSERT(channel == channel_);
+ if (!tcp_) {
+ LOG_F(LS_WARNING) << "NULL tcp";
+ return;
+ }
+ tcp_->NotifyPacket(data, size);
+ AdjustClock();
+}
+
+void PseudoTcpChannel::OnChannelConnectionChanged(TransportChannel* channel,
+ const Candidate& candidate) {
+ LOG_F(LS_VERBOSE) << "[" << channel_name_ << "]";
+ ASSERT(worker_thread_->IsCurrent());
+ CritScope lock(&cs_);
+ if (!channel_) {
+ LOG_F(LS_WARNING) << "NULL channel";
+ return;
+ }
+ ASSERT(channel == channel_);
+ if (!tcp_) {
+ LOG_F(LS_WARNING) << "NULL tcp";
+ return;
+ }
+
+ uint16 mtu = 1280; // safe default
+ int family = candidate.address().family();
+ Socket* socket =
+ worker_thread_->socketserver()->CreateAsyncSocket(family, SOCK_DGRAM);
+ talk_base::scoped_ptr<Socket> mtu_socket(socket);
+ if (socket == NULL) {
+ LOG_F(LS_WARNING) << "Couldn't create socket while estimating MTU.";
+ } else {
+ if (mtu_socket->Connect(candidate.address()) < 0 ||
+ mtu_socket->EstimateMTU(&mtu) < 0) {
+ LOG_F(LS_WARNING) << "Failed to estimate MTU, error="
+ << mtu_socket->GetError();
+ }
+ }
+
+ LOG_F(LS_VERBOSE) << "Using MTU of " << mtu << " bytes";
+ tcp_->NotifyMTU(mtu);
+ AdjustClock();
+}
+
+void PseudoTcpChannel::OnTcpOpen(PseudoTcp* tcp) {
+ LOG_F(LS_VERBOSE) << "[" << channel_name_ << "]";
+ ASSERT(cs_.CurrentThreadIsOwner());
+ ASSERT(worker_thread_->IsCurrent());
+ ASSERT(tcp == tcp_);
+ if (stream_) {
+ stream_readable_ = true;
+ pending_read_event_ = true;
+ stream_thread_->Post(this, MSG_ST_EVENT,
+ new EventData(SE_OPEN | SE_READ | SE_WRITE));
+ }
+}
+
+void PseudoTcpChannel::OnTcpReadable(PseudoTcp* tcp) {
+ //LOG_F(LS_VERBOSE);
+ ASSERT(cs_.CurrentThreadIsOwner());
+ ASSERT(worker_thread_->IsCurrent());
+ ASSERT(tcp == tcp_);
+ if (stream_) {
+ stream_readable_ = true;
+ if (!pending_read_event_) {
+ pending_read_event_ = true;
+ stream_thread_->Post(this, MSG_ST_EVENT, new EventData(SE_READ));
+ }
+ }
+}
+
+void PseudoTcpChannel::OnTcpWriteable(PseudoTcp* tcp) {
+ //LOG_F(LS_VERBOSE);
+ ASSERT(cs_.CurrentThreadIsOwner());
+ ASSERT(worker_thread_->IsCurrent());
+ ASSERT(tcp == tcp_);
+ if (stream_)
+ stream_thread_->Post(this, MSG_ST_EVENT, new EventData(SE_WRITE));
+}
+
+void PseudoTcpChannel::OnTcpClosed(PseudoTcp* tcp, uint32 nError) {
+ LOG_F(LS_VERBOSE) << "[" << channel_name_ << "]";
+ ASSERT(cs_.CurrentThreadIsOwner());
+ ASSERT(worker_thread_->IsCurrent());
+ ASSERT(tcp == tcp_);
+ if (stream_)
+ stream_thread_->Post(this, MSG_ST_EVENT, new EventData(SE_CLOSE, nError));
+}
+
+//
+// Multi-thread methods
+//
+
+void PseudoTcpChannel::OnMessage(Message* pmsg) {
+ if (pmsg->message_id == MSG_WK_CLOCK) {
+
+ ASSERT(worker_thread_->IsCurrent());
+ //LOG(LS_INFO) << "PseudoTcpChannel::OnMessage(MSG_WK_CLOCK)";
+ CritScope lock(&cs_);
+ if (tcp_) {
+ tcp_->NotifyClock(PseudoTcp::Now());
+ AdjustClock(false);
+ }
+
+ } else if (pmsg->message_id == MSG_WK_PURGE) {
+
+ ASSERT(worker_thread_->IsCurrent());
+ LOG_F(LS_INFO) << "(MSG_WK_PURGE)";
+ // At this point, we know there are no additional worker thread messages.
+ CritScope lock(&cs_);
+ ASSERT(NULL == session_);
+ ASSERT(NULL == channel_);
+ worker_thread_ = NULL;
+ CheckDestroy();
+
+ } else if (pmsg->message_id == MSG_ST_EVENT) {
+
+ ASSERT(stream_thread_->IsCurrent());
+ //LOG(LS_INFO) << "PseudoTcpChannel::OnMessage(MSG_ST_EVENT, "
+ // << data->event << ", " << data->error << ")";
+ ASSERT(stream_ != NULL);
+ EventData* data = static_cast<EventData*>(pmsg->pdata);
+ if (data->event & SE_READ) {
+ CritScope lock(&cs_);
+ pending_read_event_ = false;
+ }
+ stream_->SignalEvent(stream_, data->event, data->error);
+ delete data;
+
+ } else if (pmsg->message_id == MSG_SI_DESTROYCHANNEL) {
+
+ ASSERT(signal_thread_->IsCurrent());
+ LOG_F(LS_INFO) << "(MSG_SI_DESTROYCHANNEL)";
+ ASSERT(session_ != NULL);
+ ASSERT(channel_ != NULL);
+ session_->DestroyChannel(content_name_, channel_->component());
+
+ } else if (pmsg->message_id == MSG_SI_DESTROY) {
+
+ ASSERT(signal_thread_->IsCurrent());
+ LOG_F(LS_INFO) << "(MSG_SI_DESTROY)";
+ // The message queue is empty, so it is safe to destroy ourselves.
+ delete this;
+
+ } else {
+ ASSERT(false);
+ }
+}
+
+IPseudoTcpNotify::WriteResult PseudoTcpChannel::TcpWritePacket(
+ PseudoTcp* tcp, const char* buffer, size_t len) {
+ ASSERT(cs_.CurrentThreadIsOwner());
+ ASSERT(tcp == tcp_);
+ ASSERT(NULL != channel_);
+ int sent = channel_->SendPacket(buffer, len);
+ if (sent > 0) {
+ //LOG_F(LS_VERBOSE) << "(" << sent << ") Sent";
+ return IPseudoTcpNotify::WR_SUCCESS;
+ } else if (IsBlockingError(channel_->GetError())) {
+ LOG_F(LS_VERBOSE) << "Blocking";
+ return IPseudoTcpNotify::WR_SUCCESS;
+ } else if (channel_->GetError() == EMSGSIZE) {
+ LOG_F(LS_ERROR) << "EMSGSIZE";
+ return IPseudoTcpNotify::WR_TOO_LARGE;
+ } else {
+ PLOG(LS_ERROR, channel_->GetError()) << "PseudoTcpChannel::TcpWritePacket";
+ ASSERT(false);
+ return IPseudoTcpNotify::WR_FAIL;
+ }
+}
+
+void PseudoTcpChannel::AdjustClock(bool clear) {
+ ASSERT(cs_.CurrentThreadIsOwner());
+ ASSERT(NULL != tcp_);
+
+ long timeout = 0;
+ if (tcp_->GetNextClock(PseudoTcp::Now(), timeout)) {
+ ASSERT(NULL != channel_);
+ // Reset the next clock, by clearing the old and setting a new one.
+ if (clear)
+ worker_thread_->Clear(this, MSG_WK_CLOCK);
+ worker_thread_->PostDelayed(_max(timeout, 0L), this, MSG_WK_CLOCK);
+ return;
+ }
+
+ delete tcp_;
+ tcp_ = NULL;
+ ready_to_connect_ = false;
+
+ if (channel_) {
+ // If TCP has failed, no need for channel_ anymore
+ signal_thread_->Post(this, MSG_SI_DESTROYCHANNEL);
+ }
+}
+
+void PseudoTcpChannel::CheckDestroy() {
+ ASSERT(cs_.CurrentThreadIsOwner());
+ if ((worker_thread_ != NULL) || (stream_ != NULL))
+ return;
+ signal_thread_->Post(this, MSG_SI_DESTROY);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// PseudoTcpChannel::InternalStream
+///////////////////////////////////////////////////////////////////////////////
+
+PseudoTcpChannel::InternalStream::InternalStream(PseudoTcpChannel* parent)
+ : parent_(parent) {
+}
+
+PseudoTcpChannel::InternalStream::~InternalStream() {
+ Close();
+}
+
+StreamState PseudoTcpChannel::InternalStream::GetState() const {
+ if (!parent_)
+ return SS_CLOSED;
+ return parent_->GetState();
+}
+
+StreamResult PseudoTcpChannel::InternalStream::Read(
+ void* buffer, size_t buffer_len, size_t* read, int* error) {
+ if (!parent_) {
+ if (error)
+ *error = ENOTCONN;
+ return SR_ERROR;
+ }
+ return parent_->Read(buffer, buffer_len, read, error);
+}
+
+StreamResult PseudoTcpChannel::InternalStream::Write(
+ const void* data, size_t data_len, size_t* written, int* error) {
+ if (!parent_) {
+ if (error)
+ *error = ENOTCONN;
+ return SR_ERROR;
+ }
+ return parent_->Write(data, data_len, written, error);
+}
+
+void PseudoTcpChannel::InternalStream::Close() {
+ if (!parent_)
+ return;
+ parent_->Close();
+ parent_ = NULL;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/tunnel/pseudotcpchannel.h b/chromium/third_party/libjingle/source/talk/session/tunnel/pseudotcpchannel.h
new file mode 100644
index 00000000000..a540699a5c7
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/tunnel/pseudotcpchannel.h
@@ -0,0 +1,140 @@
+/*
+ * libjingle
+ * Copyright 2004--2006, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_TUNNEL_PSEUDOTCPCHANNEL_H_
+#define TALK_SESSION_TUNNEL_PSEUDOTCPCHANNEL_H_
+
+#include "talk/base/criticalsection.h"
+#include "talk/base/messagequeue.h"
+#include "talk/base/stream.h"
+#include "talk/p2p/base/pseudotcp.h"
+#include "talk/p2p/base/session.h"
+
+namespace talk_base {
+class Thread;
+}
+
+namespace cricket {
+
+class Candidate;
+class TransportChannel;
+
+///////////////////////////////////////////////////////////////////////////////
+// PseudoTcpChannel
+// Note: The PseudoTcpChannel must persist until both of:
+// 1) The StreamInterface provided via GetStream has been closed.
+// This is tracked via non-null stream_.
+// 2) The PseudoTcp session has completed.
+// This is tracked via non-null worker_thread_. When PseudoTcp is done,
+// the TransportChannel is signalled to tear-down. Once the channel is
+// torn down, the worker thread is purged.
+// These indicators are checked by CheckDestroy, invoked whenever one of them
+// changes.
+///////////////////////////////////////////////////////////////////////////////
+// PseudoTcpChannel::GetStream
+// Note: The stream pointer returned by GetStream is owned by the caller.
+// They can close & immediately delete the stream while PseudoTcpChannel still
+// has cleanup work to do. They can also close the stream but not delete it
+// until long after PseudoTcpChannel has finished. We must cope with both.
+///////////////////////////////////////////////////////////////////////////////
+
+class PseudoTcpChannel
+ : public IPseudoTcpNotify,
+ public talk_base::MessageHandler,
+ public sigslot::has_slots<> {
+ public:
+ // Signal thread methods
+ PseudoTcpChannel(talk_base::Thread* stream_thread,
+ Session* session);
+
+ bool Connect(const std::string& content_name,
+ const std::string& channel_name,
+ int component);
+ talk_base::StreamInterface* GetStream();
+
+ sigslot::signal1<PseudoTcpChannel*> SignalChannelClosed;
+
+ // Call this when the Session used to create this channel is being torn
+ // down, to ensure that things get cleaned up properly.
+ void OnSessionTerminate(Session* session);
+
+ // See the PseudoTcp class for available options.
+ void GetOption(PseudoTcp::Option opt, int* value);
+ void SetOption(PseudoTcp::Option opt, int value);
+
+ private:
+ class InternalStream;
+ friend class InternalStream;
+
+ virtual ~PseudoTcpChannel();
+
+ // Stream thread methods
+ talk_base::StreamState GetState() const;
+ talk_base::StreamResult Read(void* buffer, size_t buffer_len,
+ size_t* read, int* error);
+ talk_base::StreamResult Write(const void* data, size_t data_len,
+ size_t* written, int* error);
+ void Close();
+
+ // Multi-thread methods
+ void OnMessage(talk_base::Message* pmsg);
+ void AdjustClock(bool clear = true);
+ void CheckDestroy();
+
+ // Signal thread methods
+ void OnChannelDestroyed(TransportChannel* channel);
+
+ // Worker thread methods
+ void OnChannelWritableState(TransportChannel* channel);
+ void OnChannelRead(TransportChannel* channel, const char* data, size_t size,
+ int flags);
+ void OnChannelConnectionChanged(TransportChannel* channel,
+ const Candidate& candidate);
+
+ virtual void OnTcpOpen(PseudoTcp* ptcp);
+ virtual void OnTcpReadable(PseudoTcp* ptcp);
+ virtual void OnTcpWriteable(PseudoTcp* ptcp);
+ virtual void OnTcpClosed(PseudoTcp* ptcp, uint32 nError);
+ virtual IPseudoTcpNotify::WriteResult TcpWritePacket(PseudoTcp* tcp,
+ const char* buffer,
+ size_t len);
+
+ talk_base::Thread* signal_thread_, * worker_thread_, * stream_thread_;
+ Session* session_;
+ TransportChannel* channel_;
+ std::string content_name_;
+ std::string channel_name_;
+ PseudoTcp* tcp_;
+ InternalStream* stream_;
+ bool stream_readable_, pending_read_event_;
+ bool ready_to_connect_;
+ mutable talk_base::CriticalSection cs_;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_TUNNEL_PSEUDOTCPCHANNEL_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/tunnel/securetunnelsessionclient.cc b/chromium/third_party/libjingle/source/talk/session/tunnel/securetunnelsessionclient.cc
new file mode 100644
index 00000000000..9287d22ab5c
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/tunnel/securetunnelsessionclient.cc
@@ -0,0 +1,387 @@
+/*
+ * libjingle
+ * Copyright 2004--2008, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// SecureTunnelSessionClient and SecureTunnelSession implementation.
+
+#include "talk/session/tunnel/securetunnelsessionclient.h"
+#include "talk/base/basicdefs.h"
+#include "talk/base/basictypes.h"
+#include "talk/base/common.h"
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringutils.h"
+#include "talk/base/sslidentity.h"
+#include "talk/base/sslstreamadapter.h"
+#include "talk/p2p/base/transportchannel.h"
+#include "talk/xmllite/xmlelement.h"
+#include "talk/session/tunnel/pseudotcpchannel.h"
+
+namespace cricket {
+
+// XML elements and namespaces for XMPP stanzas used in content exchanges.
+
+const char NS_SECURE_TUNNEL[] = "http://www.google.com/talk/securetunnel";
+const buzz::StaticQName QN_SECURE_TUNNEL_DESCRIPTION =
+ { NS_SECURE_TUNNEL, "description" };
+const buzz::StaticQName QN_SECURE_TUNNEL_TYPE =
+ { NS_SECURE_TUNNEL, "type" };
+const buzz::StaticQName QN_SECURE_TUNNEL_CLIENT_CERT =
+ { NS_SECURE_TUNNEL, "client-cert" };
+const buzz::StaticQName QN_SECURE_TUNNEL_SERVER_CERT =
+ { NS_SECURE_TUNNEL, "server-cert" };
+const char CN_SECURE_TUNNEL[] = "securetunnel";
+
+// SecureTunnelContentDescription
+
+// TunnelContentDescription is extended to hold string forms of the
+// client and server certificate, PEM encoded.
+
+struct SecureTunnelContentDescription : public ContentDescription {
+ std::string description;
+ std::string client_pem_certificate;
+ std::string server_pem_certificate;
+
+ SecureTunnelContentDescription(const std::string& desc,
+ const std::string& client_pem_cert,
+ const std::string& server_pem_cert)
+ : description(desc),
+ client_pem_certificate(client_pem_cert),
+ server_pem_certificate(server_pem_cert) {
+ }
+ virtual ContentDescription* Copy() const {
+ return new SecureTunnelContentDescription(*this);
+ }
+};
+
+// SecureTunnelSessionClient
+
+SecureTunnelSessionClient::SecureTunnelSessionClient(
+ const buzz::Jid& jid, SessionManager* manager)
+ : TunnelSessionClient(jid, manager, NS_SECURE_TUNNEL) {
+}
+
+void SecureTunnelSessionClient::SetIdentity(talk_base::SSLIdentity* identity) {
+ ASSERT(identity_.get() == NULL);
+ identity_.reset(identity);
+}
+
+bool SecureTunnelSessionClient::GenerateIdentity() {
+ ASSERT(identity_.get() == NULL);
+ identity_.reset(talk_base::SSLIdentity::Generate(
+ // The name on the certificate does not matter: the peer will
+ // make sure the cert it gets during SSL negotiation matches the
+ // one it got from XMPP. It would be neat to put something
+ // recognizable in there such as the JID, except this will show
+ // in clear during the SSL negotiation and so it could be a
+ // privacy issue. Specifying an empty string here causes
+ // it to use a random string.
+#ifdef _DEBUG
+ jid().Str()
+#else
+ ""
+#endif
+ ));
+ if (identity_.get() == NULL) {
+ LOG(LS_ERROR) << "Failed to generate SSL identity";
+ return false;
+ }
+ return true;
+}
+
+talk_base::SSLIdentity& SecureTunnelSessionClient::GetIdentity() const {
+ ASSERT(identity_.get() != NULL);
+ return *identity_;
+}
+
+// Parses a certificate from a PEM encoded string.
+// Returns NULL on failure.
+// The caller is responsible for freeing the returned object.
+static talk_base::SSLCertificate* ParseCertificate(
+ const std::string& pem_cert) {
+ if (pem_cert.empty())
+ return NULL;
+ return talk_base::SSLCertificate::FromPEMString(pem_cert);
+}
+
+TunnelSession* SecureTunnelSessionClient::MakeTunnelSession(
+ Session* session, talk_base::Thread* stream_thread,
+ TunnelSessionRole role) {
+ return new SecureTunnelSession(this, session, stream_thread, role);
+}
+
+bool FindSecureTunnelContent(const cricket::SessionDescription* sdesc,
+ std::string* name,
+ const SecureTunnelContentDescription** content) {
+ const ContentInfo* cinfo = sdesc->FirstContentByType(NS_SECURE_TUNNEL);
+ if (cinfo == NULL)
+ return false;
+
+ *name = cinfo->name;
+ *content = static_cast<const SecureTunnelContentDescription*>(
+ cinfo->description);
+ return true;
+}
+
+void SecureTunnelSessionClient::OnIncomingTunnel(const buzz::Jid &jid,
+ Session *session) {
+ std::string content_name;
+ const SecureTunnelContentDescription* content = NULL;
+ if (!FindSecureTunnelContent(session->remote_description(),
+ &content_name, &content)) {
+ ASSERT(false);
+ }
+
+ // Validate the certificate
+ talk_base::scoped_ptr<talk_base::SSLCertificate> peer_cert(
+ ParseCertificate(content->client_pem_certificate));
+ if (peer_cert.get() == NULL) {
+ LOG(LS_ERROR)
+ << "Rejecting incoming secure tunnel with invalid cetificate";
+ DeclineTunnel(session);
+ return;
+ }
+ // If there were a convenient place we could have cached the
+ // peer_cert so as not to have to parse it a second time when
+ // configuring the tunnel.
+ SignalIncomingTunnel(this, jid, content->description, session);
+}
+
+// The XML representation of a session initiation request (XMPP IQ),
+// containing the initiator's SecureTunnelContentDescription,
+// looks something like this:
+// <iq from="INITIATOR@gmail.com/pcpE101B7F4"
+// to="RECIPIENT@gmail.com/pcp8B87F0A3"
+// type="set" id="3">
+// <session xmlns="http://www.google.com/session"
+// type="initiate" id="2508605813"
+// initiator="INITIATOR@gmail.com/pcpE101B7F4">
+// <description xmlns="http://www.google.com/talk/securetunnel">
+// <type>send:filename</type>
+// <client-cert>
+// -----BEGIN CERTIFICATE-----
+// INITIATOR'S CERTIFICATE IN PERM FORMAT (ASCII GIBBERISH)
+// -----END CERTIFICATE-----
+// </client-cert>
+// </description>
+// <transport xmlns="http://www.google.com/transport/p2p"/>
+// </session>
+// </iq>
+
+// The session accept iq, containing the recipient's certificate and
+// echoing the initiator's certificate, looks something like this:
+// <iq from="RECIPIENT@gmail.com/pcpE101B7F4"
+// to="INITIATOR@gmail.com/pcpE101B7F4"
+// type="set" id="5">
+// <session xmlns="http://www.google.com/session"
+// type="accept" id="2508605813"
+// initiator="sdoyon911@gmail.com/pcpE101B7F4">
+// <description xmlns="http://www.google.com/talk/securetunnel">
+// <type>send:FILENAME</type>
+// <client-cert>
+// -----BEGIN CERTIFICATE-----
+// INITIATOR'S CERTIFICATE IN PERM FORMAT (ASCII GIBBERISH)
+// -----END CERTIFICATE-----
+// </client-cert>
+// <server-cert>
+// -----BEGIN CERTIFICATE-----
+// RECIPIENT'S CERTIFICATE IN PERM FORMAT (ASCII GIBBERISH)
+// -----END CERTIFICATE-----
+// </server-cert>
+// </description>
+// </session>
+// </iq>
+
+
+bool SecureTunnelSessionClient::ParseContent(SignalingProtocol protocol,
+ const buzz::XmlElement* elem,
+ ContentDescription** content,
+ ParseError* error) {
+ const buzz::XmlElement* type_elem = elem->FirstNamed(QN_SECURE_TUNNEL_TYPE);
+
+ if (type_elem == NULL)
+ // Missing mandatory XML element.
+ return false;
+
+ // Here we consider the certificate components to be optional. In
+ // practice the client certificate is always present, and the server
+ // certificate is initially missing from the session description
+ // sent during session initiation. OnAccept() will enforce that we
+ // have a certificate for our peer.
+ const buzz::XmlElement* client_cert_elem =
+ elem->FirstNamed(QN_SECURE_TUNNEL_CLIENT_CERT);
+ const buzz::XmlElement* server_cert_elem =
+ elem->FirstNamed(QN_SECURE_TUNNEL_SERVER_CERT);
+ *content = new SecureTunnelContentDescription(
+ type_elem->BodyText(),
+ client_cert_elem ? client_cert_elem->BodyText() : "",
+ server_cert_elem ? server_cert_elem->BodyText() : "");
+ return true;
+}
+
+bool SecureTunnelSessionClient::WriteContent(
+ SignalingProtocol protocol, const ContentDescription* untyped_content,
+ buzz::XmlElement** elem, WriteError* error) {
+ const SecureTunnelContentDescription* content =
+ static_cast<const SecureTunnelContentDescription*>(untyped_content);
+
+ buzz::XmlElement* root =
+ new buzz::XmlElement(QN_SECURE_TUNNEL_DESCRIPTION, true);
+ buzz::XmlElement* type_elem = new buzz::XmlElement(QN_SECURE_TUNNEL_TYPE);
+ type_elem->SetBodyText(content->description);
+ root->AddElement(type_elem);
+ if (!content->client_pem_certificate.empty()) {
+ buzz::XmlElement* client_cert_elem =
+ new buzz::XmlElement(QN_SECURE_TUNNEL_CLIENT_CERT);
+ client_cert_elem->SetBodyText(content->client_pem_certificate);
+ root->AddElement(client_cert_elem);
+ }
+ if (!content->server_pem_certificate.empty()) {
+ buzz::XmlElement* server_cert_elem =
+ new buzz::XmlElement(QN_SECURE_TUNNEL_SERVER_CERT);
+ server_cert_elem->SetBodyText(content->server_pem_certificate);
+ root->AddElement(server_cert_elem);
+ }
+ *elem = root;
+ return true;
+}
+
+SessionDescription* NewSecureTunnelSessionDescription(
+ const std::string& content_name, ContentDescription* content) {
+ SessionDescription* sdesc = new SessionDescription();
+ sdesc->AddContent(content_name, NS_SECURE_TUNNEL, content);
+ return sdesc;
+}
+
+SessionDescription* SecureTunnelSessionClient::CreateOffer(
+ const buzz::Jid &jid, const std::string &description) {
+ // We are the initiator so we are the client. Put our cert into the
+ // description.
+ std::string pem_cert = GetIdentity().certificate().ToPEMString();
+ return NewSecureTunnelSessionDescription(
+ CN_SECURE_TUNNEL,
+ new SecureTunnelContentDescription(description, pem_cert, ""));
+}
+
+SessionDescription* SecureTunnelSessionClient::CreateAnswer(
+ const SessionDescription* offer) {
+ std::string content_name;
+ const SecureTunnelContentDescription* offer_tunnel = NULL;
+ if (!FindSecureTunnelContent(offer, &content_name, &offer_tunnel))
+ return NULL;
+
+ // We are accepting a session request. We need to add our cert, the
+ // server cert, into the description. The client cert was validated
+ // in OnIncomingTunnel().
+ ASSERT(!offer_tunnel->client_pem_certificate.empty());
+ return NewSecureTunnelSessionDescription(
+ content_name,
+ new SecureTunnelContentDescription(
+ offer_tunnel->description,
+ offer_tunnel->client_pem_certificate,
+ GetIdentity().certificate().ToPEMString()));
+}
+
+// SecureTunnelSession
+
+SecureTunnelSession::SecureTunnelSession(
+ SecureTunnelSessionClient* client, Session* session,
+ talk_base::Thread* stream_thread, TunnelSessionRole role)
+ : TunnelSession(client, session, stream_thread),
+ role_(role) {
+}
+
+talk_base::StreamInterface* SecureTunnelSession::MakeSecureStream(
+ talk_base::StreamInterface* stream) {
+ talk_base::SSLStreamAdapter* ssl_stream =
+ talk_base::SSLStreamAdapter::Create(stream);
+ talk_base::SSLIdentity* identity =
+ static_cast<SecureTunnelSessionClient*>(client_)->
+ GetIdentity().GetReference();
+ ssl_stream->SetIdentity(identity);
+ if (role_ == RESPONDER)
+ ssl_stream->SetServerRole();
+ ssl_stream->StartSSLWithPeer();
+
+ // SSL negotiation will start on the stream as soon as it
+ // opens. However our SSLStreamAdapter still hasn't been told what
+ // certificate to allow for our peer. If we are the initiator, we do
+ // not have the peer's certificate yet: we will obtain it from the
+ // session accept message which we will receive later (see
+ // OnAccept()). We won't Connect() the PseudoTcpChannel until we get
+ // that, so the stream will stay closed until then. Keep a handle
+ // on the streem so we can configure the peer certificate later.
+ ssl_stream_reference_.reset(new talk_base::StreamReference(ssl_stream));
+ return ssl_stream_reference_->NewReference();
+}
+
+talk_base::StreamInterface* SecureTunnelSession::GetStream() {
+ ASSERT(channel_ != NULL);
+ ASSERT(ssl_stream_reference_.get() == NULL);
+ return MakeSecureStream(channel_->GetStream());
+}
+
+void SecureTunnelSession::OnAccept() {
+ // We have either sent or received a session accept: it's time to
+ // connect the tunnel. First we must set the peer certificate.
+ ASSERT(channel_ != NULL);
+ ASSERT(session_ != NULL);
+ std::string content_name;
+ const SecureTunnelContentDescription* remote_tunnel = NULL;
+ if (!FindSecureTunnelContent(session_->remote_description(),
+ &content_name, &remote_tunnel)) {
+ session_->Reject(STR_TERMINATE_INCOMPATIBLE_PARAMETERS);
+ return;
+ }
+
+ const std::string& cert_pem =
+ role_ == INITIATOR ? remote_tunnel->server_pem_certificate :
+ remote_tunnel->client_pem_certificate;
+ talk_base::SSLCertificate* peer_cert =
+ ParseCertificate(cert_pem);
+ if (peer_cert == NULL) {
+ ASSERT(role_ == INITIATOR); // when RESPONDER we validated it earlier
+ LOG(LS_ERROR)
+ << "Rejecting secure tunnel accept with invalid cetificate";
+ session_->Reject(STR_TERMINATE_INCOMPATIBLE_PARAMETERS);
+ return;
+ }
+ ASSERT(ssl_stream_reference_.get() != NULL);
+ talk_base::SSLStreamAdapter* ssl_stream =
+ static_cast<talk_base::SSLStreamAdapter*>(
+ ssl_stream_reference_->GetStream());
+ ssl_stream->SetPeerCertificate(peer_cert); // pass ownership of certificate.
+ // We no longer need our handle to the ssl stream.
+ ssl_stream_reference_.reset();
+ LOG(LS_INFO) << "Connecting tunnel";
+ // This will try to connect the PseudoTcpChannel. If and when that
+ // succeeds, then ssl negotiation will take place, and when that
+ // succeeds, the tunnel stream will finally open.
+ VERIFY(channel_->Connect(
+ content_name, "tcp", ICE_CANDIDATE_COMPONENT_DEFAULT));
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/tunnel/securetunnelsessionclient.h b/chromium/third_party/libjingle/source/talk/session/tunnel/securetunnelsessionclient.h
new file mode 100644
index 00000000000..5c65b984d96
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/tunnel/securetunnelsessionclient.h
@@ -0,0 +1,165 @@
+/*
+ * libjingle
+ * Copyright 2004--2008, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// SecureTunnelSessionClient and SecureTunnelSession.
+// SecureTunnelSessionClient extends TunnelSessionClient to exchange
+// certificates as part of the session description.
+// SecureTunnelSession is a TunnelSession that wraps the underlying
+// tunnel stream into an SSLStreamAdapter.
+
+#ifndef TALK_SESSION_TUNNEL_SECURETUNNELSESSIONCLIENT_H_
+#define TALK_SESSION_TUNNEL_SECURETUNNELSESSIONCLIENT_H_
+
+#include <string>
+
+#include "talk/base/sslidentity.h"
+#include "talk/base/sslstreamadapter.h"
+#include "talk/session/tunnel/tunnelsessionclient.h"
+
+namespace cricket {
+
+class SecureTunnelSession; // below
+
+// SecureTunnelSessionClient
+
+// This TunnelSessionClient establishes secure tunnels protected by
+// SSL/TLS. The PseudoTcpChannel stream is wrapped with an
+// SSLStreamAdapter. An SSLIdentity must be set or generated.
+//
+// The TunnelContentDescription is extended to include the client and
+// server certificates. The initiator acts as the client. The session
+// initiate stanza carries a description that contains the client's
+// certificate, and the session accept response's description has the
+// server certificate added to it.
+
+class SecureTunnelSessionClient : public TunnelSessionClient {
+ public:
+ // The jid is used as the name for sessions for outgoing tunnels.
+ // manager is the SessionManager to which we register this client
+ // and its sessions.
+ SecureTunnelSessionClient(const buzz::Jid& jid, SessionManager* manager);
+
+ // Configures this client to use a preexisting SSLIdentity.
+ // The client takes ownership of the identity object.
+ // Use either SetIdentity or GenerateIdentity, and only once.
+ void SetIdentity(talk_base::SSLIdentity* identity);
+
+ // Generates an identity from nothing.
+ // Returns true if generation was successful.
+ // Use either SetIdentity or GenerateIdentity, and only once.
+ bool GenerateIdentity();
+
+ // Returns our identity for SSL purposes, as either set by
+ // SetIdentity() or generated by GenerateIdentity(). Call this
+ // method only after our identity has been successfully established
+ // by one of those methods.
+ talk_base::SSLIdentity& GetIdentity() const;
+
+ // Inherited methods
+ virtual void OnIncomingTunnel(const buzz::Jid& jid, Session *session);
+ virtual bool ParseContent(SignalingProtocol protocol,
+ const buzz::XmlElement* elem,
+ ContentDescription** content,
+ ParseError* error);
+ virtual bool WriteContent(SignalingProtocol protocol,
+ const ContentDescription* content,
+ buzz::XmlElement** elem,
+ WriteError* error);
+ virtual SessionDescription* CreateOffer(
+ const buzz::Jid &jid, const std::string &description);
+ virtual SessionDescription* CreateAnswer(
+ const SessionDescription* offer);
+
+ protected:
+ virtual TunnelSession* MakeTunnelSession(
+ Session* session, talk_base::Thread* stream_thread,
+ TunnelSessionRole role);
+
+ private:
+ // Our identity (key and certificate) for SSL purposes. The
+ // certificate part will be communicated within the session
+ // description. The identity will be passed to the SSLStreamAdapter
+ // and used for SSL authentication.
+ talk_base::scoped_ptr<talk_base::SSLIdentity> identity_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SecureTunnelSessionClient);
+};
+
+// SecureTunnelSession:
+// A TunnelSession represents one session for one client. It
+// provides the actual tunnel stream and handles state changes.
+// A SecureTunnelSession is a TunnelSession that wraps the underlying
+// tunnel stream into an SSLStreamAdapter.
+
+class SecureTunnelSession : public TunnelSession {
+ public:
+ // This TunnelSession will tie together the given client and session.
+ // stream_thread is passed to the PseudoTCPChannel: it's the thread
+ // designated to interact with the tunnel stream.
+ // role is either INITIATOR or RESPONDER, depending on who is
+ // initiating the session.
+ SecureTunnelSession(SecureTunnelSessionClient* client, Session* session,
+ talk_base::Thread* stream_thread,
+ TunnelSessionRole role);
+
+ // Returns the stream that implements the actual P2P tunnel.
+ // This may be called only once. Caller is responsible for freeing
+ // the returned object.
+ virtual talk_base::StreamInterface* GetStream();
+
+ protected:
+ // Inherited method: callback on accepting a session.
+ virtual void OnAccept();
+
+ // Helper method for GetStream() that Instantiates the
+ // SSLStreamAdapter to wrap the PseudoTcpChannel's stream, and
+ // configures it with our identity and role.
+ talk_base::StreamInterface* MakeSecureStream(
+ talk_base::StreamInterface* stream);
+
+ // Our role in requesting the tunnel: INITIATOR or
+ // RESPONDER. Translates to our role in SSL negotiation:
+ // respectively client or server. Also indicates which slot of the
+ // SecureTunnelContentDescription our cert goes into: client-cert or
+ // server-cert respectively.
+ TunnelSessionRole role_;
+
+ // This is the stream representing the usable tunnel endpoint. It's
+ // a StreamReference wrapping the SSLStreamAdapter instance, which
+ // further wraps a PseudoTcpChannel::InternalStream. The
+ // StreamReference is because in the case of CreateTunnel(), the
+ // stream endpoint is returned early, but we need to keep a handle
+ // on it so we can setup the peer certificate when we receive it
+ // later.
+ talk_base::scoped_ptr<talk_base::StreamReference> ssl_stream_reference_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(SecureTunnelSession);
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_TUNNEL_SECURETUNNELSESSIONCLIENT_H_
diff --git a/chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient.cc b/chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient.cc
new file mode 100644
index 00000000000..71d0ce11985
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient.cc
@@ -0,0 +1,432 @@
+/*
+ * libjingle
+ * Copyright 2004--2008, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/base/basicdefs.h"
+#include "talk/base/basictypes.h"
+#include "talk/base/common.h"
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringutils.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/base/transportchannel.h"
+#include "talk/xmllite/xmlelement.h"
+#include "pseudotcpchannel.h"
+#include "tunnelsessionclient.h"
+
+namespace cricket {
+
+const char NS_TUNNEL[] = "http://www.google.com/talk/tunnel";
+const buzz::StaticQName QN_TUNNEL_DESCRIPTION = { NS_TUNNEL, "description" };
+const buzz::StaticQName QN_TUNNEL_TYPE = { NS_TUNNEL, "type" };
+const char CN_TUNNEL[] = "tunnel";
+
+enum {
+ MSG_CLOCK = 1,
+ MSG_DESTROY,
+ MSG_TERMINATE,
+ MSG_EVENT,
+ MSG_CREATE_TUNNEL,
+};
+
+struct EventData : public talk_base::MessageData {
+ int event, error;
+ EventData(int ev, int err = 0) : event(ev), error(err) { }
+};
+
+struct CreateTunnelData : public talk_base::MessageData {
+ buzz::Jid jid;
+ std::string description;
+ talk_base::Thread* thread;
+ talk_base::StreamInterface* stream;
+};
+
+extern const talk_base::ConstantLabel SESSION_STATES[];
+
+const talk_base::ConstantLabel SESSION_STATES[] = {
+ KLABEL(Session::STATE_INIT),
+ KLABEL(Session::STATE_SENTINITIATE),
+ KLABEL(Session::STATE_RECEIVEDINITIATE),
+ KLABEL(Session::STATE_SENTACCEPT),
+ KLABEL(Session::STATE_RECEIVEDACCEPT),
+ KLABEL(Session::STATE_SENTMODIFY),
+ KLABEL(Session::STATE_RECEIVEDMODIFY),
+ KLABEL(Session::STATE_SENTREJECT),
+ KLABEL(Session::STATE_RECEIVEDREJECT),
+ KLABEL(Session::STATE_SENTREDIRECT),
+ KLABEL(Session::STATE_SENTTERMINATE),
+ KLABEL(Session::STATE_RECEIVEDTERMINATE),
+ KLABEL(Session::STATE_INPROGRESS),
+ KLABEL(Session::STATE_DEINIT),
+ LASTLABEL
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// TunnelContentDescription
+///////////////////////////////////////////////////////////////////////////////
+
+struct TunnelContentDescription : public ContentDescription {
+ std::string description;
+
+ TunnelContentDescription(const std::string& desc) : description(desc) { }
+ virtual ContentDescription* Copy() const {
+ return new TunnelContentDescription(*this);
+ }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// TunnelSessionClientBase
+///////////////////////////////////////////////////////////////////////////////
+
+TunnelSessionClientBase::TunnelSessionClientBase(const buzz::Jid& jid,
+ SessionManager* manager, const std::string &ns)
+ : jid_(jid), session_manager_(manager), namespace_(ns), shutdown_(false) {
+ session_manager_->AddClient(namespace_, this);
+}
+
+TunnelSessionClientBase::~TunnelSessionClientBase() {
+ shutdown_ = true;
+ for (std::vector<TunnelSession*>::iterator it = sessions_.begin();
+ it != sessions_.end();
+ ++it) {
+ Session* session = (*it)->ReleaseSession(true);
+ session_manager_->DestroySession(session);
+ }
+ session_manager_->RemoveClient(namespace_);
+}
+
+void TunnelSessionClientBase::OnSessionCreate(Session* session, bool received) {
+ LOG(LS_INFO) << "TunnelSessionClientBase::OnSessionCreate: received="
+ << received;
+ ASSERT(session_manager_->signaling_thread()->IsCurrent());
+ if (received)
+ sessions_.push_back(
+ MakeTunnelSession(session, talk_base::Thread::Current(), RESPONDER));
+}
+
+void TunnelSessionClientBase::OnSessionDestroy(Session* session) {
+ LOG(LS_INFO) << "TunnelSessionClientBase::OnSessionDestroy";
+ ASSERT(session_manager_->signaling_thread()->IsCurrent());
+ if (shutdown_)
+ return;
+ for (std::vector<TunnelSession*>::iterator it = sessions_.begin();
+ it != sessions_.end();
+ ++it) {
+ if ((*it)->HasSession(session)) {
+ VERIFY((*it)->ReleaseSession(false) == session);
+ sessions_.erase(it);
+ return;
+ }
+ }
+}
+
+talk_base::StreamInterface* TunnelSessionClientBase::CreateTunnel(
+ const buzz::Jid& to, const std::string& description) {
+ // Valid from any thread
+ CreateTunnelData data;
+ data.jid = to;
+ data.description = description;
+ data.thread = talk_base::Thread::Current();
+ data.stream = NULL;
+ session_manager_->signaling_thread()->Send(this, MSG_CREATE_TUNNEL, &data);
+ return data.stream;
+}
+
+talk_base::StreamInterface* TunnelSessionClientBase::AcceptTunnel(
+ Session* session) {
+ ASSERT(session_manager_->signaling_thread()->IsCurrent());
+ TunnelSession* tunnel = NULL;
+ for (std::vector<TunnelSession*>::iterator it = sessions_.begin();
+ it != sessions_.end();
+ ++it) {
+ if ((*it)->HasSession(session)) {
+ tunnel = *it;
+ break;
+ }
+ }
+ ASSERT(tunnel != NULL);
+
+ SessionDescription* answer = CreateAnswer(session->remote_description());
+ if (answer == NULL)
+ return NULL;
+
+ session->Accept(answer);
+ return tunnel->GetStream();
+}
+
+void TunnelSessionClientBase::DeclineTunnel(Session* session) {
+ ASSERT(session_manager_->signaling_thread()->IsCurrent());
+ session->Reject(STR_TERMINATE_DECLINE);
+}
+
+void TunnelSessionClientBase::OnMessage(talk_base::Message* pmsg) {
+ if (pmsg->message_id == MSG_CREATE_TUNNEL) {
+ ASSERT(session_manager_->signaling_thread()->IsCurrent());
+ CreateTunnelData* data = static_cast<CreateTunnelData*>(pmsg->pdata);
+ SessionDescription* offer = CreateOffer(data->jid, data->description);
+ if (offer == NULL) {
+ return;
+ }
+
+ Session* session = session_manager_->CreateSession(jid_.Str(), namespace_);
+ TunnelSession* tunnel = MakeTunnelSession(session, data->thread,
+ INITIATOR);
+ sessions_.push_back(tunnel);
+ session->Initiate(data->jid.Str(), offer);
+ data->stream = tunnel->GetStream();
+ }
+}
+
+TunnelSession* TunnelSessionClientBase::MakeTunnelSession(
+ Session* session, talk_base::Thread* stream_thread,
+ TunnelSessionRole /*role*/) {
+ return new TunnelSession(this, session, stream_thread);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// TunnelSessionClient
+///////////////////////////////////////////////////////////////////////////////
+
+TunnelSessionClient::TunnelSessionClient(const buzz::Jid& jid,
+ SessionManager* manager,
+ const std::string &ns)
+ : TunnelSessionClientBase(jid, manager, ns) {
+}
+
+TunnelSessionClient::TunnelSessionClient(const buzz::Jid& jid,
+ SessionManager* manager)
+ : TunnelSessionClientBase(jid, manager, NS_TUNNEL) {
+}
+
+TunnelSessionClient::~TunnelSessionClient() {
+}
+
+
+bool TunnelSessionClient::ParseContent(SignalingProtocol protocol,
+ const buzz::XmlElement* elem,
+ ContentDescription** content,
+ ParseError* error) {
+ if (const buzz::XmlElement* type_elem = elem->FirstNamed(QN_TUNNEL_TYPE)) {
+ *content = new TunnelContentDescription(type_elem->BodyText());
+ return true;
+ }
+ return false;
+}
+
+bool TunnelSessionClient::WriteContent(
+ SignalingProtocol protocol,
+ const ContentDescription* untyped_content,
+ buzz::XmlElement** elem, WriteError* error) {
+ const TunnelContentDescription* content =
+ static_cast<const TunnelContentDescription*>(untyped_content);
+
+ buzz::XmlElement* root = new buzz::XmlElement(QN_TUNNEL_DESCRIPTION, true);
+ buzz::XmlElement* type_elem = new buzz::XmlElement(QN_TUNNEL_TYPE);
+ type_elem->SetBodyText(content->description);
+ root->AddElement(type_elem);
+ *elem = root;
+ return true;
+}
+
+SessionDescription* NewTunnelSessionDescription(
+ const std::string& content_name, ContentDescription* content) {
+ SessionDescription* sdesc = new SessionDescription();
+ sdesc->AddContent(content_name, NS_TUNNEL, content);
+ return sdesc;
+}
+
+bool FindTunnelContent(const cricket::SessionDescription* sdesc,
+ std::string* name,
+ const TunnelContentDescription** content) {
+ const ContentInfo* cinfo = sdesc->FirstContentByType(NS_TUNNEL);
+ if (cinfo == NULL)
+ return false;
+
+ *name = cinfo->name;
+ *content = static_cast<const TunnelContentDescription*>(
+ cinfo->description);
+ return true;
+}
+
+void TunnelSessionClient::OnIncomingTunnel(const buzz::Jid &jid,
+ Session *session) {
+ std::string content_name;
+ const TunnelContentDescription* content = NULL;
+ if (!FindTunnelContent(session->remote_description(),
+ &content_name, &content)) {
+ session->Reject(STR_TERMINATE_INCOMPATIBLE_PARAMETERS);
+ return;
+ }
+
+ SignalIncomingTunnel(this, jid, content->description, session);
+}
+
+SessionDescription* TunnelSessionClient::CreateOffer(
+ const buzz::Jid &jid, const std::string &description) {
+ SessionDescription* offer = NewTunnelSessionDescription(
+ CN_TUNNEL, new TunnelContentDescription(description));
+ talk_base::scoped_ptr<TransportDescription> tdesc(
+ session_manager_->transport_desc_factory()->CreateOffer(
+ TransportOptions(), NULL));
+ if (tdesc.get()) {
+ offer->AddTransportInfo(TransportInfo(CN_TUNNEL, *tdesc));
+ } else {
+ delete offer;
+ offer = NULL;
+ }
+ return offer;
+}
+
+SessionDescription* TunnelSessionClient::CreateAnswer(
+ const SessionDescription* offer) {
+ std::string content_name;
+ const TunnelContentDescription* offer_tunnel = NULL;
+ if (!FindTunnelContent(offer, &content_name, &offer_tunnel))
+ return NULL;
+
+ SessionDescription* answer = NewTunnelSessionDescription(
+ content_name, new TunnelContentDescription(offer_tunnel->description));
+ const TransportInfo* tinfo = offer->GetTransportInfoByName(content_name);
+ if (tinfo) {
+ const TransportDescription* offer_tdesc = &tinfo->description;
+ ASSERT(offer_tdesc != NULL);
+ talk_base::scoped_ptr<TransportDescription> tdesc(
+ session_manager_->transport_desc_factory()->CreateAnswer(
+ offer_tdesc, TransportOptions(), NULL));
+ if (tdesc.get()) {
+ answer->AddTransportInfo(TransportInfo(content_name, *tdesc));
+ } else {
+ delete answer;
+ answer = NULL;
+ }
+ }
+ return answer;
+}
+///////////////////////////////////////////////////////////////////////////////
+// TunnelSession
+///////////////////////////////////////////////////////////////////////////////
+
+//
+// Signalling thread methods
+//
+
+TunnelSession::TunnelSession(TunnelSessionClientBase* client, Session* session,
+ talk_base::Thread* stream_thread)
+ : client_(client), session_(session), channel_(NULL) {
+ ASSERT(client_ != NULL);
+ ASSERT(session_ != NULL);
+ session_->SignalState.connect(this, &TunnelSession::OnSessionState);
+ channel_ = new PseudoTcpChannel(stream_thread, session_);
+ channel_->SignalChannelClosed.connect(this, &TunnelSession::OnChannelClosed);
+}
+
+TunnelSession::~TunnelSession() {
+ ASSERT(client_ != NULL);
+ ASSERT(session_ == NULL);
+ ASSERT(channel_ == NULL);
+}
+
+talk_base::StreamInterface* TunnelSession::GetStream() {
+ ASSERT(channel_ != NULL);
+ return channel_->GetStream();
+}
+
+bool TunnelSession::HasSession(Session* session) {
+ ASSERT(NULL != session_);
+ return (session_ == session);
+}
+
+Session* TunnelSession::ReleaseSession(bool channel_exists) {
+ ASSERT(NULL != session_);
+ ASSERT(NULL != channel_);
+ Session* session = session_;
+ session_->SignalState.disconnect(this);
+ session_ = NULL;
+ if (channel_exists)
+ channel_->SignalChannelClosed.disconnect(this);
+ channel_ = NULL;
+ delete this;
+ return session;
+}
+
+void TunnelSession::OnSessionState(BaseSession* session,
+ BaseSession::State state) {
+ LOG(LS_INFO) << "TunnelSession::OnSessionState("
+ << talk_base::nonnull(
+ talk_base::FindLabel(state, SESSION_STATES), "Unknown")
+ << ")";
+ ASSERT(session == session_);
+
+ switch (state) {
+ case Session::STATE_RECEIVEDINITIATE:
+ OnInitiate();
+ break;
+ case Session::STATE_SENTACCEPT:
+ case Session::STATE_RECEIVEDACCEPT:
+ OnAccept();
+ break;
+ case Session::STATE_SENTTERMINATE:
+ case Session::STATE_RECEIVEDTERMINATE:
+ OnTerminate();
+ break;
+ case Session::STATE_DEINIT:
+ // ReleaseSession should have been called before this.
+ ASSERT(false);
+ break;
+ default:
+ break;
+ }
+}
+
+void TunnelSession::OnInitiate() {
+ ASSERT(client_ != NULL);
+ ASSERT(session_ != NULL);
+ client_->OnIncomingTunnel(buzz::Jid(session_->remote_name()), session_);
+}
+
+void TunnelSession::OnAccept() {
+ ASSERT(channel_ != NULL);
+ const ContentInfo* content =
+ session_->remote_description()->FirstContentByType(NS_TUNNEL);
+ ASSERT(content != NULL);
+ VERIFY(channel_->Connect(
+ content->name, "tcp", ICE_CANDIDATE_COMPONENT_DEFAULT));
+}
+
+void TunnelSession::OnTerminate() {
+ ASSERT(channel_ != NULL);
+ channel_->OnSessionTerminate(session_);
+}
+
+void TunnelSession::OnChannelClosed(PseudoTcpChannel* channel) {
+ ASSERT(channel_ == channel);
+ ASSERT(session_ != NULL);
+ session_->Terminate();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient.h b/chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient.h
new file mode 100644
index 00000000000..55ce14a6d4a
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient.h
@@ -0,0 +1,182 @@
+/*
+ * libjingle
+ * Copyright 2004--2008, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TUNNELSESSIONCLIENT_H__
+#define __TUNNELSESSIONCLIENT_H__
+
+#include <vector>
+
+#include "talk/base/criticalsection.h"
+#include "talk/base/stream.h"
+#include "talk/p2p/base/constants.h"
+#include "talk/p2p/base/pseudotcp.h"
+#include "talk/p2p/base/session.h"
+#include "talk/p2p/base/sessiondescription.h"
+#include "talk/p2p/base/sessionmanager.h"
+#include "talk/p2p/base/sessionclient.h"
+#include "talk/xmllite/qname.h"
+#include "talk/xmpp/constants.h"
+
+namespace cricket {
+
+class TunnelSession;
+class TunnelStream;
+
+enum TunnelSessionRole { INITIATOR, RESPONDER };
+
+///////////////////////////////////////////////////////////////////////////////
+// TunnelSessionClient
+///////////////////////////////////////////////////////////////////////////////
+
+// Base class is still abstract
+class TunnelSessionClientBase
+ : public SessionClient, public talk_base::MessageHandler {
+public:
+ TunnelSessionClientBase(const buzz::Jid& jid, SessionManager* manager,
+ const std::string &ns);
+ virtual ~TunnelSessionClientBase();
+
+ const buzz::Jid& jid() const { return jid_; }
+ SessionManager* session_manager() const { return session_manager_; }
+
+ void OnSessionCreate(Session* session, bool received);
+ void OnSessionDestroy(Session* session);
+
+ // This can be called on any thread. The stream interface is
+ // thread-safe, but notifications must be registered on the creating
+ // thread.
+ talk_base::StreamInterface* CreateTunnel(const buzz::Jid& to,
+ const std::string& description);
+
+ talk_base::StreamInterface* AcceptTunnel(Session* session);
+ void DeclineTunnel(Session* session);
+
+ // Invoked on an incoming tunnel
+ virtual void OnIncomingTunnel(const buzz::Jid &jid, Session *session) = 0;
+
+ // Invoked on an outgoing session request
+ virtual SessionDescription* CreateOffer(
+ const buzz::Jid &jid, const std::string &description) = 0;
+ // Invoked on a session request accept to create
+ // the local-side session description
+ virtual SessionDescription* CreateAnswer(
+ const SessionDescription* offer) = 0;
+
+protected:
+
+ void OnMessage(talk_base::Message* pmsg);
+
+ // helper method to instantiate TunnelSession. By overriding this,
+ // subclasses of TunnelSessionClient are able to instantiate
+ // subclasses of TunnelSession instead.
+ virtual TunnelSession* MakeTunnelSession(Session* session,
+ talk_base::Thread* stream_thread,
+ TunnelSessionRole role);
+
+ buzz::Jid jid_;
+ SessionManager* session_manager_;
+ std::vector<TunnelSession*> sessions_;
+ std::string namespace_;
+ bool shutdown_;
+};
+
+class TunnelSessionClient
+ : public TunnelSessionClientBase, public sigslot::has_slots<> {
+public:
+ TunnelSessionClient(const buzz::Jid& jid, SessionManager* manager);
+ TunnelSessionClient(const buzz::Jid& jid, SessionManager* manager,
+ const std::string &ns);
+ virtual ~TunnelSessionClient();
+
+ virtual bool ParseContent(SignalingProtocol protocol,
+ const buzz::XmlElement* elem,
+ ContentDescription** content,
+ ParseError* error);
+ virtual bool WriteContent(SignalingProtocol protocol,
+ const ContentDescription* content,
+ buzz::XmlElement** elem,
+ WriteError* error);
+
+ // Signal arguments are this, initiator, description, session
+ sigslot::signal4<TunnelSessionClient*, buzz::Jid, std::string, Session*>
+ SignalIncomingTunnel;
+
+ virtual void OnIncomingTunnel(const buzz::Jid &jid,
+ Session *session);
+ virtual SessionDescription* CreateOffer(
+ const buzz::Jid &jid, const std::string &description);
+ virtual SessionDescription* CreateAnswer(
+ const SessionDescription* offer);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// TunnelSession
+// Note: The lifetime of TunnelSession is complicated. It needs to survive
+// until the following three conditions are true:
+// 1) TunnelStream has called Close (tracked via non-null stream_)
+// 2) PseudoTcp has completed (tracked via non-null tcp_)
+// 3) Session has been destroyed (tracked via non-null session_)
+// This is accomplished by calling CheckDestroy after these indicators change.
+///////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+// TunnelStream
+// Note: Because TunnelStream provides a stream interface, its lifetime is
+// controlled by the owner of the stream pointer. As a result, we must support
+// both the TunnelSession disappearing before TunnelStream, and vice versa.
+///////////////////////////////////////////////////////////////////////////////
+
+class PseudoTcpChannel;
+
+class TunnelSession : public sigslot::has_slots<> {
+ public:
+ // Signalling thread methods
+ TunnelSession(TunnelSessionClientBase* client, Session* session,
+ talk_base::Thread* stream_thread);
+
+ virtual talk_base::StreamInterface* GetStream();
+ bool HasSession(Session* session);
+ Session* ReleaseSession(bool channel_exists);
+
+ protected:
+ virtual ~TunnelSession();
+
+ virtual void OnSessionState(BaseSession* session, BaseSession::State state);
+ virtual void OnInitiate();
+ virtual void OnAccept();
+ virtual void OnTerminate();
+ virtual void OnChannelClosed(PseudoTcpChannel* channel);
+
+ TunnelSessionClientBase* client_;
+ Session* session_;
+ PseudoTcpChannel* channel_;
+};
+
+///////////////////////////////////////////////////////////////////////////////
+
+} // namespace cricket
+
+#endif // __TUNNELSESSIONCLIENT_H__
diff --git a/chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient_unittest.cc b/chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient_unittest.cc
new file mode 100644
index 00000000000..7370351e60d
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/session/tunnel/tunnelsessionclient_unittest.cc
@@ -0,0 +1,226 @@
+/*
+ * libjingle
+ * Copyright 2010, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+#include "talk/base/gunit.h"
+#include "talk/base/messagehandler.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stream.h"
+#include "talk/base/thread.h"
+#include "talk/base/timeutils.h"
+#include "talk/p2p/base/sessionmanager.h"
+#include "talk/p2p/base/transport.h"
+#include "talk/p2p/client/fakeportallocator.h"
+#include "talk/session/tunnel/tunnelsessionclient.h"
+
+static const int kTimeoutMs = 10000;
+static const int kBlockSize = 4096;
+static const buzz::Jid kLocalJid("local@localhost");
+static const buzz::Jid kRemoteJid("remote@localhost");
+
+// This test fixture creates the necessary plumbing to create and run
+// two TunnelSessionClients that talk to each other.
+class TunnelSessionClientTest : public testing::Test,
+ public talk_base::MessageHandler,
+ public sigslot::has_slots<> {
+ public:
+ TunnelSessionClientTest()
+ : local_pa_(talk_base::Thread::Current(), NULL),
+ remote_pa_(talk_base::Thread::Current(), NULL),
+ local_sm_(&local_pa_, talk_base::Thread::Current()),
+ remote_sm_(&remote_pa_, talk_base::Thread::Current()),
+ local_client_(kLocalJid, &local_sm_),
+ remote_client_(kRemoteJid, &remote_sm_),
+ done_(false) {
+ local_sm_.SignalRequestSignaling.connect(this,
+ &TunnelSessionClientTest::OnLocalRequestSignaling);
+ local_sm_.SignalOutgoingMessage.connect(this,
+ &TunnelSessionClientTest::OnOutgoingMessage);
+ remote_sm_.SignalRequestSignaling.connect(this,
+ &TunnelSessionClientTest::OnRemoteRequestSignaling);
+ remote_sm_.SignalOutgoingMessage.connect(this,
+ &TunnelSessionClientTest::OnOutgoingMessage);
+ remote_client_.SignalIncomingTunnel.connect(this,
+ &TunnelSessionClientTest::OnIncomingTunnel);
+ }
+
+ // Transfer the desired amount of data from the local to the remote client.
+ void TestTransfer(int size) {
+ // Create some dummy data to send.
+ send_stream_.ReserveSize(size);
+ for (int i = 0; i < size; ++i) {
+ char ch = static_cast<char>(i);
+ send_stream_.Write(&ch, 1, NULL, NULL);
+ }
+ send_stream_.Rewind();
+ // Prepare the receive stream.
+ recv_stream_.ReserveSize(size);
+ // Create the tunnel and set things in motion.
+ local_tunnel_.reset(local_client_.CreateTunnel(kRemoteJid, "test"));
+ local_tunnel_->SignalEvent.connect(this,
+ &TunnelSessionClientTest::OnStreamEvent);
+ EXPECT_TRUE_WAIT(done_, kTimeoutMs);
+ // Make sure we received the right data.
+ EXPECT_EQ(0, memcmp(send_stream_.GetBuffer(),
+ recv_stream_.GetBuffer(), size));
+ }
+
+ private:
+ enum { MSG_LSIGNAL, MSG_RSIGNAL };
+
+ // There's no SessionManager* argument in this callback, so we need 2 of them.
+ void OnLocalRequestSignaling() {
+ local_sm_.OnSignalingReady();
+ }
+ void OnRemoteRequestSignaling() {
+ remote_sm_.OnSignalingReady();
+ }
+
+ // Post a message, to avoid problems with directly connecting the callbacks.
+ void OnOutgoingMessage(cricket::SessionManager* manager,
+ const buzz::XmlElement* stanza) {
+ if (manager == &local_sm_) {
+ talk_base::Thread::Current()->Post(this, MSG_LSIGNAL,
+ talk_base::WrapMessageData(*stanza));
+ } else if (manager == &remote_sm_) {
+ talk_base::Thread::Current()->Post(this, MSG_RSIGNAL,
+ talk_base::WrapMessageData(*stanza));
+ }
+ }
+
+ // Need to add a "from=" attribute (normally added by the server)
+ // Then route the incoming signaling message to the "other" session manager.
+ virtual void OnMessage(talk_base::Message* message) {
+ talk_base::TypedMessageData<buzz::XmlElement>* data =
+ static_cast<talk_base::TypedMessageData<buzz::XmlElement>*>(
+ message->pdata);
+ bool response = data->data().Attr(buzz::QN_TYPE) == buzz::STR_RESULT;
+ if (message->message_id == MSG_RSIGNAL) {
+ data->data().AddAttr(buzz::QN_FROM, remote_client_.jid().Str());
+ if (!response) {
+ local_sm_.OnIncomingMessage(&data->data());
+ } else {
+ local_sm_.OnIncomingResponse(NULL, &data->data());
+ }
+ } else if (message->message_id == MSG_LSIGNAL) {
+ data->data().AddAttr(buzz::QN_FROM, local_client_.jid().Str());
+ if (!response) {
+ remote_sm_.OnIncomingMessage(&data->data());
+ } else {
+ remote_sm_.OnIncomingResponse(NULL, &data->data());
+ }
+ }
+ delete data;
+ }
+
+ // Accept the tunnel when it arrives and wire up the stream.
+ void OnIncomingTunnel(cricket::TunnelSessionClient* client,
+ buzz::Jid jid, std::string description,
+ cricket::Session* session) {
+ remote_tunnel_.reset(remote_client_.AcceptTunnel(session));
+ remote_tunnel_->SignalEvent.connect(this,
+ &TunnelSessionClientTest::OnStreamEvent);
+ }
+
+ // Send from send_stream_ as long as we're not flow-controlled.
+ // Read bytes out into recv_stream_ as they arrive.
+ // End the test when we are notified that the local side has closed the
+ // tunnel. All data has been read out at this point.
+ void OnStreamEvent(talk_base::StreamInterface* stream, int events,
+ int error) {
+ if (events & talk_base::SE_READ) {
+ if (stream == remote_tunnel_.get()) {
+ ReadData();
+ }
+ }
+ if (events & talk_base::SE_WRITE) {
+ if (stream == local_tunnel_.get()) {
+ bool done = false;
+ WriteData(&done);
+ if (done) {
+ local_tunnel_->Close();
+ }
+ }
+ }
+ if (events & talk_base::SE_CLOSE) {
+ if (stream == remote_tunnel_.get()) {
+ remote_tunnel_->Close();
+ done_ = true;
+ }
+ }
+ }
+
+ // Spool from the tunnel into recv_stream.
+ // Flow() doesn't work here because it won't write if the read blocks.
+ void ReadData() {
+ char block[kBlockSize];
+ size_t read, position;
+ talk_base::StreamResult res;
+ while ((res = remote_tunnel_->Read(block, sizeof(block), &read, NULL)) ==
+ talk_base::SR_SUCCESS) {
+ recv_stream_.Write(block, read, NULL, NULL);
+ }
+ ASSERT(res != talk_base::SR_EOS);
+ recv_stream_.GetPosition(&position);
+ LOG(LS_VERBOSE) << "Recv position: " << position;
+ }
+ // Spool from send_stream into the tunnel. Back up if we get flow controlled.
+ void WriteData(bool* done) {
+ char block[kBlockSize];
+ size_t leftover = 0, position;
+ talk_base::StreamResult res = talk_base::Flow(&send_stream_,
+ block, sizeof(block), local_tunnel_.get(), &leftover);
+ if (res == talk_base::SR_BLOCK) {
+ send_stream_.GetPosition(&position);
+ send_stream_.SetPosition(position - leftover);
+ LOG(LS_VERBOSE) << "Send position: " << position - leftover;
+ *done = false;
+ } else if (res == talk_base::SR_SUCCESS) {
+ *done = true;
+ } else {
+ ASSERT(false); // shouldn't happen
+ }
+ }
+
+ private:
+ cricket::FakePortAllocator local_pa_;
+ cricket::FakePortAllocator remote_pa_;
+ cricket::SessionManager local_sm_;
+ cricket::SessionManager remote_sm_;
+ cricket::TunnelSessionClient local_client_;
+ cricket::TunnelSessionClient remote_client_;
+ talk_base::scoped_ptr<talk_base::StreamInterface> local_tunnel_;
+ talk_base::scoped_ptr<talk_base::StreamInterface> remote_tunnel_;
+ talk_base::MemoryStream send_stream_;
+ talk_base::MemoryStream recv_stream_;
+ bool done_;
+};
+
+// Test the normal case of sending data from one side to the other.
+TEST_F(TunnelSessionClientTest, TestTransfer) {
+ TestTransfer(1000000);
+}