summaryrefslogtreecommitdiff
path: root/chromium/third_party/libjingle/source/talk/media/webrtc
diff options
context:
space:
mode:
authorZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
committerZeno Albisser <zeno.albisser@digia.com>2013-08-15 21:46:11 +0200
commit679147eead574d186ebf3069647b4c23e8ccace6 (patch)
treefc247a0ac8ff119f7c8550879ebb6d3dd8d1ff69 /chromium/third_party/libjingle/source/talk/media/webrtc
downloadqtwebengine-chromium-679147eead574d186ebf3069647b4c23e8ccace6.tar.gz
Initial import.
Diffstat (limited to 'chromium/third_party/libjingle/source/talk/media/webrtc')
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtccommon.h66
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcdeviceinfo.h123
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvcmfactory.h63
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvideocapturemodule.h159
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvideoengine.h1104
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvoiceengine.h1020
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtccommon.h76
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcexport.h79
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcmediaengine.h194
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender.cc176
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender.h211
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender_unittest.cc147
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe.cc183
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe.h120
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe_unittest.cc84
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer.cc366
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer.h103
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer_unittest.cc145
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideodecoderfactory.h53
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoencoderfactory.h89
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine.cc3487
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine.h441
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine_unittest.cc1826
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.cc358
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.h150
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe_unittest.cc313
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvie.h151
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoe.h179
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine.cc3079
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine.h428
-rw-r--r--chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine_unittest.cc2844
31 files changed, 17817 insertions, 0 deletions
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtccommon.h b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtccommon.h
new file mode 100644
index 00000000000..026ad10b497
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtccommon.h
@@ -0,0 +1,66 @@
+/*
+ * libjingle
+ * Copyright 2011 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_PHONE_FAKEWEBRTCCOMMON_H_
+#define TALK_SESSION_PHONE_FAKEWEBRTCCOMMON_H_
+
+#include "talk/base/common.h"
+
+namespace cricket {
+
+#define WEBRTC_STUB(method, args) \
+ virtual int method args OVERRIDE { return 0; }
+
+#define WEBRTC_STUB_CONST(method, args) \
+ virtual int method args const OVERRIDE { return 0; }
+
+#define WEBRTC_BOOL_STUB(method, args) \
+ virtual bool method args OVERRIDE { return true; }
+
+#define WEBRTC_VOID_STUB(method, args) \
+ virtual void method args OVERRIDE {}
+
+#define WEBRTC_FUNC(method, args) \
+ virtual int method args OVERRIDE
+
+#define WEBRTC_FUNC_CONST(method, args) \
+ virtual int method args const OVERRIDE
+
+#define WEBRTC_BOOL_FUNC(method, args) \
+ virtual bool method args OVERRIDE
+
+#define WEBRTC_VOID_FUNC(method, args) \
+ virtual void method args OVERRIDE
+
+#define WEBRTC_CHECK_CHANNEL(channel) \
+ if (channels_.find(channel) == channels_.end()) return -1;
+
+#define WEBRTC_ASSERT_CHANNEL(channel) \
+ ASSERT(channels_.find(channel) != channels_.end());
+} // namespace cricket
+
+#endif // TALK_SESSION_PHONE_FAKEWEBRTCCOMMON_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcdeviceinfo.h b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcdeviceinfo.h
new file mode 100644
index 00000000000..585f31e9ee4
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcdeviceinfo.h
@@ -0,0 +1,123 @@
+// libjingle
+// Copyright 2004 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef TALK_SESSION_PHONE_FAKEWEBRTCDEVICEINFO_H_
+#define TALK_SESSION_PHONE_FAKEWEBRTCDEVICEINFO_H_
+
+#include <vector>
+
+#include "talk/base/stringutils.h"
+#include "talk/media/webrtc/webrtcvideocapturer.h"
+
+// Fake class for mocking out webrtc::VideoCaptureModule::DeviceInfo.
+class FakeWebRtcDeviceInfo : public webrtc::VideoCaptureModule::DeviceInfo {
+ public:
+ struct Device {
+ Device(const std::string& n, const std::string& i) : name(n), id(i) {}
+ std::string name;
+ std::string id;
+ std::string product;
+ std::vector<webrtc::VideoCaptureCapability> caps;
+ };
+ FakeWebRtcDeviceInfo() {}
+ void AddDevice(const std::string& device_name, const std::string& device_id) {
+ devices_.push_back(Device(device_name, device_id));
+ }
+ void AddCapability(const std::string& device_id,
+ const webrtc::VideoCaptureCapability& cap) {
+ Device* dev = GetDeviceById(
+ reinterpret_cast<const char*>(device_id.c_str()));
+ if (!dev) return;
+ dev->caps.push_back(cap);
+ }
+ virtual uint32_t NumberOfDevices() {
+ return static_cast<int>(devices_.size());
+ }
+ virtual int32_t GetDeviceName(uint32_t device_num,
+ char* device_name,
+ uint32_t device_name_len,
+ char* device_id,
+ uint32_t device_id_len,
+ char* product_id,
+ uint32_t product_id_len) {
+ Device* dev = GetDeviceByIndex(device_num);
+ if (!dev) return -1;
+ talk_base::strcpyn(reinterpret_cast<char*>(device_name), device_name_len,
+ dev->name.c_str());
+ talk_base::strcpyn(reinterpret_cast<char*>(device_id), device_id_len,
+ dev->id.c_str());
+ if (product_id) {
+ talk_base::strcpyn(reinterpret_cast<char*>(product_id), product_id_len,
+ dev->product.c_str());
+ }
+ return 0;
+ }
+ virtual int32_t NumberOfCapabilities(const char* device_id) {
+ Device* dev = GetDeviceById(device_id);
+ if (!dev) return -1;
+ return static_cast<int32_t>(dev->caps.size());
+ }
+ virtual int32_t GetCapability(const char* device_id,
+ const uint32_t device_cap_num,
+ webrtc::VideoCaptureCapability& cap) {
+ Device* dev = GetDeviceById(device_id);
+ if (!dev) return -1;
+ if (device_cap_num >= dev->caps.size()) return -1;
+ cap = dev->caps[device_cap_num];
+ return 0;
+ }
+ virtual int32_t GetOrientation(const char* device_id,
+ webrtc::VideoCaptureRotation& rotation) {
+ return -1; // not implemented
+ }
+ virtual int32_t GetBestMatchedCapability(
+ const char* device_id,
+ const webrtc::VideoCaptureCapability& requested,
+ webrtc::VideoCaptureCapability& resulting) {
+ return -1; // not implemented
+ }
+ virtual int32_t DisplayCaptureSettingsDialogBox(
+ const char* device_id, const char* dialog_title,
+ void* parent, uint32_t x, uint32_t y) {
+ return -1; // not implemented
+ }
+
+ Device* GetDeviceByIndex(size_t num) {
+ return (num < devices_.size()) ? &devices_[num] : NULL;
+ }
+ Device* GetDeviceById(const char* device_id) {
+ for (size_t i = 0; i < devices_.size(); ++i) {
+ if (devices_[i].id == reinterpret_cast<const char*>(device_id)) {
+ return &devices_[i];
+ }
+ }
+ return NULL;
+ }
+
+ private:
+ std::vector<Device> devices_;
+};
+
+#endif // TALK_SESSION_PHONE_FAKEWEBRTCDEVICEINFO_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvcmfactory.h b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvcmfactory.h
new file mode 100644
index 00000000000..38643f9c2a7
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvcmfactory.h
@@ -0,0 +1,63 @@
+// libjingle
+// Copyright 2004 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef TALK_SESSION_PHONE_FAKEWEBRTCVCMFACTORY_H_
+#define TALK_SESSION_PHONE_FAKEWEBRTCVCMFACTORY_H_
+
+#include <vector>
+
+#include "talk/media/webrtc/fakewebrtcvideocapturemodule.h"
+#include "talk/media/webrtc/webrtcvideocapturer.h"
+
+// Factory class to allow the fakes above to be injected into
+// WebRtcVideoCapturer.
+class FakeWebRtcVcmFactory : public cricket::WebRtcVcmFactoryInterface {
+ public:
+ virtual webrtc::VideoCaptureModule* Create(int module_id,
+ const char* device_id) {
+ if (!device_info.GetDeviceById(device_id)) return NULL;
+ FakeWebRtcVideoCaptureModule* module =
+ new FakeWebRtcVideoCaptureModule(this, module_id);
+ modules.push_back(module);
+ return module;
+ }
+ virtual webrtc::VideoCaptureModule::DeviceInfo* CreateDeviceInfo(int id) {
+ return &device_info;
+ }
+ virtual void DestroyDeviceInfo(webrtc::VideoCaptureModule::DeviceInfo* info) {
+ }
+ void OnDestroyed(webrtc::VideoCaptureModule* module) {
+ std::remove(modules.begin(), modules.end(), module);
+ }
+ FakeWebRtcDeviceInfo device_info;
+ std::vector<FakeWebRtcVideoCaptureModule*> modules;
+};
+
+FakeWebRtcVideoCaptureModule::~FakeWebRtcVideoCaptureModule() {
+ if (factory_)
+ factory_->OnDestroyed(this);
+}
+
+#endif // TALK_SESSION_PHONE_FAKEWEBRTCVCMFACTORY_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvideocapturemodule.h b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvideocapturemodule.h
new file mode 100644
index 00000000000..b823bc18fe6
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvideocapturemodule.h
@@ -0,0 +1,159 @@
+// libjingle
+// Copyright 2004 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef TALK_SESSION_PHONE_FAKEWEBRTCVIDEOCAPTUREMODULE_H_
+#define TALK_SESSION_PHONE_FAKEWEBRTCVIDEOCAPTUREMODULE_H_
+
+#include <vector>
+
+#include "talk/media/base/testutils.h"
+#include "talk/media/webrtc/fakewebrtcdeviceinfo.h"
+#include "talk/media/webrtc/webrtcvideocapturer.h"
+
+class FakeWebRtcVcmFactory;
+
+// Fake class for mocking out webrtc::VideoCaptureModule.
+class FakeWebRtcVideoCaptureModule : public webrtc::VideoCaptureModule {
+ public:
+ FakeWebRtcVideoCaptureModule(FakeWebRtcVcmFactory* factory, int32_t id)
+ : factory_(factory),
+ id_(id),
+ callback_(NULL),
+ running_(false),
+ delay_(0) {
+ }
+ virtual int32_t Version(char* version,
+ uint32_t& remaining_buffer_in_bytes,
+ uint32_t& position) const {
+ return 0;
+ }
+ virtual int32_t TimeUntilNextProcess() {
+ return 0;
+ }
+ virtual int32_t Process() {
+ return 0;
+ }
+ virtual int32_t ChangeUniqueId(const int32_t id) {
+ id_ = id;
+ return 0;
+ }
+ virtual int32_t RegisterCaptureDataCallback(
+ webrtc::VideoCaptureDataCallback& callback) {
+ callback_ = &callback;
+ return 0;
+ }
+ virtual int32_t DeRegisterCaptureDataCallback() {
+ callback_ = NULL;
+ return 0;
+ }
+ virtual int32_t RegisterCaptureCallback(
+ webrtc::VideoCaptureFeedBack& callback) {
+ return -1; // not implemented
+ }
+ virtual int32_t DeRegisterCaptureCallback() {
+ return 0;
+ }
+ virtual int32_t StartCapture(
+ const webrtc::VideoCaptureCapability& cap) {
+ if (running_) return -1;
+ cap_ = cap;
+ running_ = true;
+ return 0;
+ }
+ virtual int32_t StopCapture() {
+ running_ = false;
+ return 0;
+ }
+ virtual const char* CurrentDeviceName() const {
+ return NULL; // not implemented
+ }
+ virtual bool CaptureStarted() {
+ return running_;
+ }
+ virtual int32_t CaptureSettings(
+ webrtc::VideoCaptureCapability& settings) {
+ if (!running_) return -1;
+ settings = cap_;
+ return 0;
+ }
+ virtual int32_t SetCaptureDelay(int32_t delay) {
+ delay_ = delay;
+ return 0;
+ }
+ virtual int32_t CaptureDelay() {
+ return delay_;
+ }
+ virtual int32_t SetCaptureRotation(
+ webrtc::VideoCaptureRotation rotation) {
+ return -1; // not implemented
+ }
+ virtual VideoCaptureEncodeInterface* GetEncodeInterface(
+ const webrtc::VideoCodec& codec) {
+ return NULL; // not implemented
+ }
+ virtual int32_t EnableFrameRateCallback(const bool enable) {
+ return -1; // not implemented
+ }
+ virtual int32_t EnableNoPictureAlarm(const bool enable) {
+ return -1; // not implemented
+ }
+ virtual int32_t AddRef() {
+ return 0;
+ }
+ virtual int32_t Release() {
+ delete this;
+ return 0;
+ }
+
+ bool SendFrame(int w, int h) {
+ if (!running_) return false;
+ webrtc::I420VideoFrame sample;
+ // Setting stride based on width.
+ if (sample.CreateEmptyFrame(w, h, w, (w + 1) / 2, (w + 1) / 2) < 0) {
+ return false;
+ }
+ if (callback_) {
+ callback_->OnIncomingCapturedFrame(id_, sample);
+ }
+ return true;
+ }
+
+ const webrtc::VideoCaptureCapability& cap() const {
+ return cap_;
+ }
+
+ private:
+ // Ref-counted, use Release() instead.
+ ~FakeWebRtcVideoCaptureModule();
+
+ FakeWebRtcVcmFactory* factory_;
+ int id_;
+ webrtc::VideoCaptureDataCallback* callback_;
+ bool running_;
+ webrtc::VideoCaptureCapability cap_;
+ int delay_;
+};
+
+#endif // TALK_SESSION_PHONE_FAKEWEBRTCVIDEOCAPTUREMODULE_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvideoengine.h b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvideoengine.h
new file mode 100644
index 00000000000..df539048105
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvideoengine.h
@@ -0,0 +1,1104 @@
+/*
+ * libjingle
+ * Copyright 2010 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_WEBRTC_FAKEWEBRTCVIDEOENGINE_H_
+#define TALK_MEDIA_WEBRTC_FAKEWEBRTCVIDEOENGINE_H_
+
+#include <map>
+#include <set>
+#include <vector>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/stringutils.h"
+#include "talk/media/base/codec.h"
+#include "talk/media/webrtc/fakewebrtccommon.h"
+#include "talk/media/webrtc/webrtcvideodecoderfactory.h"
+#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
+#include "talk/media/webrtc/webrtcvie.h"
+
+namespace webrtc {
+
+bool operator==(const webrtc::VideoCodec& c1, const webrtc::VideoCodec& c2) {
+ return memcmp(&c1, &c2, sizeof(c1)) == 0;
+}
+
+}
+
+namespace cricket {
+
+#define WEBRTC_CHECK_CAPTURER(capturer) \
+ if (capturers_.find(capturer) == capturers_.end()) return -1;
+
+#define WEBRTC_ASSERT_CAPTURER(capturer) \
+ ASSERT(capturers_.find(capturer) != capturers_.end());
+
+static const int kMinVideoBitrate = 100;
+static const int kStartVideoBitrate = 300;
+static const int kMaxVideoBitrate = 1000;
+
+// WebRtc channel id and capture id share the same number space.
+// This is how AddRenderer(renderId, ...) is able to tell if it is adding a
+// renderer for a channel or it is adding a renderer for a capturer.
+static const int kViEChannelIdBase = 0;
+static const int kViEChannelIdMax = 1000;
+static const int kViECaptureIdBase = 10000; // Make sure there is a gap.
+static const int kViECaptureIdMax = 11000;
+
+// Fake class for mocking out webrtc::VideoDecoder
+class FakeWebRtcVideoDecoder : public webrtc::VideoDecoder {
+ public:
+ FakeWebRtcVideoDecoder()
+ : num_frames_received_(0) {
+ }
+
+ virtual int32 InitDecode(const webrtc::VideoCodec*, int32) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ virtual int32 Decode(
+ const webrtc::EncodedImage&, bool, const webrtc::RTPFragmentationHeader*,
+ const webrtc::CodecSpecificInfo*, int64) {
+ num_frames_received_++;
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ virtual int32 RegisterDecodeCompleteCallback(
+ webrtc::DecodedImageCallback*) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ virtual int32 Release() {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ virtual int32 Reset() {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ int GetNumFramesReceived() const {
+ return num_frames_received_;
+ }
+
+ private:
+ int num_frames_received_;
+};
+
+// Fake class for mocking out WebRtcVideoDecoderFactory.
+class FakeWebRtcVideoDecoderFactory : public WebRtcVideoDecoderFactory {
+ public:
+ FakeWebRtcVideoDecoderFactory()
+ : num_created_decoders_(0) {
+ }
+
+ virtual webrtc::VideoDecoder* CreateVideoDecoder(
+ webrtc::VideoCodecType type) {
+ if (supported_codec_types_.count(type) == 0) {
+ return NULL;
+ }
+ FakeWebRtcVideoDecoder* decoder = new FakeWebRtcVideoDecoder();
+ decoders_.push_back(decoder);
+ num_created_decoders_++;
+ return decoder;
+ }
+
+ virtual void DestroyVideoDecoder(webrtc::VideoDecoder* decoder) {
+ decoders_.erase(
+ std::remove(decoders_.begin(), decoders_.end(), decoder),
+ decoders_.end());
+ delete decoder;
+ }
+
+ void AddSupportedVideoCodecType(webrtc::VideoCodecType type) {
+ supported_codec_types_.insert(type);
+ }
+
+ int GetNumCreatedDecoders() {
+ return num_created_decoders_;
+ }
+
+ const std::vector<FakeWebRtcVideoDecoder*>& decoders() {
+ return decoders_;
+ }
+
+ private:
+ std::set<webrtc::VideoCodecType> supported_codec_types_;
+ std::vector<FakeWebRtcVideoDecoder*> decoders_;
+ int num_created_decoders_;
+};
+
+// Fake class for mocking out webrtc::VideoEnoder
+class FakeWebRtcVideoEncoder : public webrtc::VideoEncoder {
+ public:
+ FakeWebRtcVideoEncoder() {}
+
+ virtual int32 InitEncode(const webrtc::VideoCodec* codecSettings,
+ int32 numberOfCores,
+ uint32 maxPayloadSize) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ virtual int32 Encode(
+ const webrtc::I420VideoFrame& inputImage,
+ const webrtc::CodecSpecificInfo* codecSpecificInfo,
+ const std::vector<webrtc::VideoFrameType>* frame_types) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ virtual int32 RegisterEncodeCompleteCallback(
+ webrtc::EncodedImageCallback* callback) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ virtual int32 Release() {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ virtual int32 SetChannelParameters(uint32 packetLoss,
+ int rtt) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+
+ virtual int32 SetRates(uint32 newBitRate,
+ uint32 frameRate) {
+ return WEBRTC_VIDEO_CODEC_OK;
+ }
+};
+
+// Fake class for mocking out WebRtcVideoEncoderFactory.
+class FakeWebRtcVideoEncoderFactory : public WebRtcVideoEncoderFactory {
+ public:
+ FakeWebRtcVideoEncoderFactory()
+ : num_created_encoders_(0) {
+ }
+
+ virtual webrtc::VideoEncoder* CreateVideoEncoder(
+ webrtc::VideoCodecType type) {
+ if (supported_codec_types_.count(type) == 0) {
+ return NULL;
+ }
+ FakeWebRtcVideoEncoder* encoder = new FakeWebRtcVideoEncoder();
+ encoders_.push_back(encoder);
+ num_created_encoders_++;
+ return encoder;
+ }
+
+ virtual void DestroyVideoEncoder(webrtc::VideoEncoder* encoder) {
+ encoders_.erase(
+ std::remove(encoders_.begin(), encoders_.end(), encoder),
+ encoders_.end());
+ delete encoder;
+ }
+
+ virtual void AddObserver(WebRtcVideoEncoderFactory::Observer* observer) {
+ bool inserted = observers_.insert(observer).second;
+ EXPECT_TRUE(inserted);
+ }
+
+ virtual void RemoveObserver(WebRtcVideoEncoderFactory::Observer* observer) {
+ size_t erased = observers_.erase(observer);
+ EXPECT_EQ(erased, 1UL);
+ }
+
+ virtual const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs()
+ const {
+ return codecs_;
+ }
+
+ void AddSupportedVideoCodecType(webrtc::VideoCodecType type,
+ const std::string& name) {
+ supported_codec_types_.insert(type);
+ codecs_.push_back(
+ WebRtcVideoEncoderFactory::VideoCodec(type, name, 1280, 720, 30));
+ }
+
+ void NotifyCodecsAvailable() {
+ std::set<WebRtcVideoEncoderFactory::Observer*>::iterator it;
+ for (it = observers_.begin(); it != observers_.end(); ++it)
+ (*it)->OnCodecsAvailable();
+ }
+
+ int GetNumCreatedEncoders() {
+ return num_created_encoders_;
+ }
+
+ const std::vector<FakeWebRtcVideoEncoder*>& encoders() {
+ return encoders_;
+ }
+
+ private:
+ std::set<webrtc::VideoCodecType> supported_codec_types_;
+ std::vector<WebRtcVideoEncoderFactory::VideoCodec> codecs_;
+ std::vector<FakeWebRtcVideoEncoder*> encoders_;
+ std::set<WebRtcVideoEncoderFactory::Observer*> observers_;
+ int num_created_encoders_;
+};
+
+class FakeWebRtcVideoEngine
+ : public webrtc::ViEBase,
+ public webrtc::ViECodec,
+ public webrtc::ViECapture,
+ public webrtc::ViENetwork,
+ public webrtc::ViERender,
+ public webrtc::ViERTP_RTCP,
+ public webrtc::ViEImageProcess,
+ public webrtc::ViEExternalCodec {
+ public:
+ struct Channel {
+ Channel()
+ : capture_id_(-1),
+ original_channel_id_(-1),
+ has_renderer_(false),
+ render_started_(false),
+ send(false),
+ receive_(false),
+ can_transmit_(true),
+ rtcp_status_(webrtc::kRtcpNone),
+ key_frame_request_method_(webrtc::kViEKeyFrameRequestNone),
+ tmmbr_(false),
+ remb_contribute_(false),
+ remb_bw_partition_(false),
+ rtp_offset_send_id_(0),
+ rtp_offset_receive_id_(0),
+ rtp_absolute_send_time_send_id_(0),
+ rtp_absolute_send_time_receive_id_(0),
+ sender_target_delay_(0),
+ receiver_target_delay_(0),
+ transmission_smoothing_(false),
+ nack_(false),
+ hybrid_nack_fec_(false),
+ send_video_bitrate_(0),
+ send_fec_bitrate_(0),
+ send_nack_bitrate_(0),
+ send_bandwidth_(0),
+ receive_bandwidth_(0) {
+ ssrcs_[0] = 0; // default ssrc.
+ memset(&send_codec, 0, sizeof(send_codec));
+ }
+ int capture_id_;
+ int original_channel_id_;
+ bool has_renderer_;
+ bool render_started_;
+ bool send;
+ bool receive_;
+ bool can_transmit_;
+ std::map<int, int> ssrcs_;
+ std::string cname_;
+ webrtc::ViERTCPMode rtcp_status_;
+ webrtc::ViEKeyFrameRequestMethod key_frame_request_method_;
+ bool tmmbr_;
+ bool remb_contribute_; // This channel contributes to the remb report.
+ bool remb_bw_partition_; // This channel is allocated part of total bw.
+ int rtp_offset_send_id_;
+ int rtp_offset_receive_id_;
+ int rtp_absolute_send_time_send_id_;
+ int rtp_absolute_send_time_receive_id_;
+ int sender_target_delay_;
+ int receiver_target_delay_;
+ bool transmission_smoothing_;
+ bool nack_;
+ bool hybrid_nack_fec_;
+ std::vector<webrtc::VideoCodec> recv_codecs;
+ std::set<unsigned int> ext_decoder_pl_types_;
+ std::set<unsigned int> ext_encoder_pl_types_;
+ webrtc::VideoCodec send_codec;
+ unsigned int send_video_bitrate_;
+ unsigned int send_fec_bitrate_;
+ unsigned int send_nack_bitrate_;
+ unsigned int send_bandwidth_;
+ unsigned int receive_bandwidth_;
+ };
+ class Capturer : public webrtc::ViEExternalCapture {
+ public:
+ Capturer() : channel_id_(-1), denoising_(false), last_capture_time_(0) { }
+ int channel_id() const { return channel_id_; }
+ void set_channel_id(int channel_id) { channel_id_ = channel_id; }
+ bool denoising() const { return denoising_; }
+ void set_denoising(bool denoising) { denoising_ = denoising; }
+ int64 last_capture_time() { return last_capture_time_; }
+
+ // From ViEExternalCapture
+ virtual int IncomingFrame(unsigned char* videoFrame,
+ unsigned int videoFrameLength,
+ unsigned short width,
+ unsigned short height,
+ webrtc::RawVideoType videoType,
+ unsigned long long captureTime) {
+ return 0;
+ }
+ virtual int IncomingFrameI420(
+ const webrtc::ViEVideoFrameI420& video_frame,
+ unsigned long long captureTime) {
+ last_capture_time_ = captureTime;
+ return 0;
+ }
+
+ private:
+ int channel_id_;
+ bool denoising_;
+ int64 last_capture_time_;
+ };
+
+ FakeWebRtcVideoEngine(const cricket::VideoCodec* const* codecs,
+ int num_codecs)
+ : inited_(false),
+ last_channel_(kViEChannelIdBase - 1),
+ fail_create_channel_(false),
+ last_capturer_(kViECaptureIdBase - 1),
+ fail_alloc_capturer_(false),
+ codecs_(codecs),
+ num_codecs_(num_codecs),
+ num_set_send_codecs_(0) {
+ }
+
+ ~FakeWebRtcVideoEngine() {
+ ASSERT(0 == channels_.size());
+ ASSERT(0 == capturers_.size());
+ }
+ bool IsInited() const { return inited_; }
+
+ int GetLastChannel() const { return last_channel_; }
+ int GetChannelFromLocalSsrc(int local_ssrc) const {
+ // ssrcs_[0] is the default local ssrc.
+ for (std::map<int, Channel*>::const_iterator iter = channels_.begin();
+ iter != channels_.end(); ++iter) {
+ if (local_ssrc == iter->second->ssrcs_[0]) {
+ return iter->first;
+ }
+ }
+ return -1;
+ }
+
+ int GetNumChannels() const { return static_cast<int>(channels_.size()); }
+ bool IsChannel(int channel) const {
+ return (channels_.find(channel) != channels_.end());
+ }
+ void set_fail_create_channel(bool fail_create_channel) {
+ fail_create_channel_ = fail_create_channel;
+ }
+
+ int GetLastCapturer() const { return last_capturer_; }
+ int GetNumCapturers() const { return static_cast<int>(capturers_.size()); }
+ void set_fail_alloc_capturer(bool fail_alloc_capturer) {
+ fail_alloc_capturer_ = fail_alloc_capturer;
+ }
+ int num_set_send_codecs() const { return num_set_send_codecs_; }
+
+ int GetCaptureId(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->capture_id_;
+ }
+ int GetOriginalChannelId(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->original_channel_id_;
+ }
+ bool GetHasRenderer(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->has_renderer_;
+ }
+ bool GetRenderStarted(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->render_started_;
+ }
+ bool GetSend(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->send;
+ }
+ int GetCaptureChannelId(int capture_id) const {
+ WEBRTC_ASSERT_CAPTURER(capture_id);
+ return capturers_.find(capture_id)->second->channel_id();
+ }
+ bool GetCaptureDenoising(int capture_id) const {
+ WEBRTC_ASSERT_CAPTURER(capture_id);
+ return capturers_.find(capture_id)->second->denoising();
+ }
+ int64 GetCaptureLastTimestamp(int capture_id) const {
+ WEBRTC_ASSERT_CAPTURER(capture_id);
+ return capturers_.find(capture_id)->second->last_capture_time();
+ }
+ webrtc::ViERTCPMode GetRtcpStatus(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->rtcp_status_;
+ }
+ webrtc::ViEKeyFrameRequestMethod GetKeyFrameRequestMethod(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->key_frame_request_method_;
+ }
+ bool GetTmmbrStatus(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->tmmbr_;
+ }
+ bool GetRembStatusBwPartition(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->remb_bw_partition_;
+ }
+ bool GetRembStatusContribute(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->remb_contribute_;
+ }
+ int GetSendRtpTimestampOffsetExtensionId(int channel) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->rtp_offset_send_id_;
+ }
+ int GetReceiveRtpTimestampOffsetExtensionId(int channel) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->rtp_offset_receive_id_;
+ }
+ int GetSendAbsoluteSendTimeExtensionId(int channel) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->rtp_absolute_send_time_send_id_;
+ }
+ int GetReceiveAbsoluteSendTimeExtensionId(int channel) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->rtp_absolute_send_time_receive_id_;
+ }
+ bool GetTransmissionSmoothingStatus(int channel) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->transmission_smoothing_;
+ }
+ int GetSenderTargetDelay(int channel) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->sender_target_delay_;
+ }
+ int GetReceiverTargetDelay(int channel) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->receiver_target_delay_;
+ }
+ bool GetNackStatus(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->nack_;
+ }
+ bool GetHybridNackFecStatus(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->hybrid_nack_fec_;
+ }
+ int GetNumSsrcs(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return static_cast<int>(
+ channels_.find(channel)->second->ssrcs_.size());
+ }
+ bool GetIsTransmitting(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->can_transmit_;
+ }
+ bool ReceiveCodecRegistered(int channel,
+ const webrtc::VideoCodec& codec) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ const std::vector<webrtc::VideoCodec>& codecs =
+ channels_.find(channel)->second->recv_codecs;
+ return std::find(codecs.begin(), codecs.end(), codec) != codecs.end();
+ };
+ bool ExternalDecoderRegistered(int channel,
+ unsigned int pl_type) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->
+ ext_decoder_pl_types_.count(pl_type) != 0;
+ };
+ int GetNumExternalDecoderRegistered(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return static_cast<int>(
+ channels_.find(channel)->second->ext_decoder_pl_types_.size());
+ };
+ bool ExternalEncoderRegistered(int channel,
+ unsigned int pl_type) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return channels_.find(channel)->second->
+ ext_encoder_pl_types_.count(pl_type) != 0;
+ };
+ int GetNumExternalEncoderRegistered(int channel) const {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ return static_cast<int>(
+ channels_.find(channel)->second->ext_encoder_pl_types_.size());
+ };
+ int GetTotalNumExternalEncoderRegistered() const {
+ std::map<int, Channel*>::const_iterator it;
+ int total_num_registered = 0;
+ for (it = channels_.begin(); it != channels_.end(); ++it)
+ total_num_registered +=
+ static_cast<int>(it->second->ext_encoder_pl_types_.size());
+ return total_num_registered;
+ }
+ void SetSendBitrates(int channel, unsigned int video_bitrate,
+ unsigned int fec_bitrate, unsigned int nack_bitrate) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ channels_[channel]->send_video_bitrate_ = video_bitrate;
+ channels_[channel]->send_fec_bitrate_ = fec_bitrate;
+ channels_[channel]->send_nack_bitrate_ = nack_bitrate;
+ }
+ void SetSendBandwidthEstimate(int channel, unsigned int send_bandwidth) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ channels_[channel]->send_bandwidth_ = send_bandwidth;
+ }
+ void SetReceiveBandwidthEstimate(int channel,
+ unsigned int receive_bandwidth) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ channels_[channel]->receive_bandwidth_ = receive_bandwidth;
+ };
+
+ WEBRTC_STUB(Release, ());
+
+ // webrtc::ViEBase
+ WEBRTC_FUNC(Init, ()) {
+ inited_ = true;
+ return 0;
+ };
+ WEBRTC_STUB(SetVoiceEngine, (webrtc::VoiceEngine*));
+ WEBRTC_FUNC(CreateChannel, (int& channel)) { // NOLINT
+ if (fail_create_channel_) {
+ return -1;
+ }
+ if (kViEChannelIdMax == last_channel_) {
+ return -1;
+ }
+ Channel* ch = new Channel();
+ channels_[++last_channel_] = ch;
+ channel = last_channel_;
+ return 0;
+ };
+ WEBRTC_FUNC(CreateChannel, (int& channel, int original_channel)) {
+ WEBRTC_CHECK_CHANNEL(original_channel);
+ if (CreateChannel(channel) != 0) {
+ return -1;
+ }
+ channels_[channel]->original_channel_id_ = original_channel;
+ return 0;
+ }
+ WEBRTC_FUNC(CreateReceiveChannel, (int& channel, int original_channel)) {
+ return CreateChannel(channel, original_channel);
+ }
+ WEBRTC_FUNC(DeleteChannel, (const int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ // Make sure we deregister all the decoders before deleting a channel.
+ EXPECT_EQ(0, GetNumExternalDecoderRegistered(channel));
+ delete channels_[channel];
+ channels_.erase(channel);
+ return 0;
+ }
+ WEBRTC_STUB(RegisterCpuOveruseObserver,
+ (int channel, webrtc::CpuOveruseObserver* observer));
+ WEBRTC_STUB(ConnectAudioChannel, (const int, const int));
+ WEBRTC_STUB(DisconnectAudioChannel, (const int));
+ WEBRTC_FUNC(StartSend, (const int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->send = true;
+ return 0;
+ }
+ WEBRTC_FUNC(StopSend, (const int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->send = false;
+ return 0;
+ }
+ WEBRTC_FUNC(StartReceive, (const int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->receive_ = true;
+ return 0;
+ }
+ WEBRTC_FUNC(StopReceive, (const int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->receive_ = false;
+ return 0;
+ }
+ WEBRTC_STUB(GetVersion, (char version[1024]));
+ WEBRTC_STUB(LastError, ());
+
+ // webrtc::ViECodec
+ WEBRTC_FUNC_CONST(NumberOfCodecs, ()) {
+ return num_codecs_;
+ };
+ WEBRTC_FUNC_CONST(GetCodec, (const unsigned char list_number,
+ webrtc::VideoCodec& out_codec)) {
+ if (list_number >= NumberOfCodecs()) {
+ return -1;
+ }
+ memset(&out_codec, 0, sizeof(out_codec));
+ const cricket::VideoCodec& c(*codecs_[list_number]);
+ if ("I420" == c.name) {
+ out_codec.codecType = webrtc::kVideoCodecI420;
+ } else if ("VP8" == c.name) {
+ out_codec.codecType = webrtc::kVideoCodecVP8;
+ } else if ("red" == c.name) {
+ out_codec.codecType = webrtc::kVideoCodecRED;
+ } else if ("ulpfec" == c.name) {
+ out_codec.codecType = webrtc::kVideoCodecULPFEC;
+ } else {
+ out_codec.codecType = webrtc::kVideoCodecUnknown;
+ }
+ talk_base::strcpyn(out_codec.plName, sizeof(out_codec.plName),
+ c.name.c_str());
+ out_codec.plType = c.id;
+ out_codec.width = c.width;
+ out_codec.height = c.height;
+ out_codec.startBitrate = kStartVideoBitrate;
+ out_codec.maxBitrate = kMaxVideoBitrate;
+ out_codec.minBitrate = kMinVideoBitrate;
+ out_codec.maxFramerate = c.framerate;
+ return 0;
+ };
+ WEBRTC_FUNC(SetSendCodec, (const int channel,
+ const webrtc::VideoCodec& codec)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->send_codec = codec;
+ ++num_set_send_codecs_;
+ return 0;
+ };
+ WEBRTC_FUNC_CONST(GetSendCodec, (const int channel,
+ webrtc::VideoCodec& codec)) { // NOLINT
+ WEBRTC_CHECK_CHANNEL(channel);
+ codec = channels_.find(channel)->second->send_codec;
+ return 0;
+ };
+ WEBRTC_FUNC(SetReceiveCodec, (const int channel,
+ const webrtc::VideoCodec& codec)) { // NOLINT
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->recv_codecs.push_back(codec);
+ return 0;
+ };
+ WEBRTC_STUB_CONST(GetReceiveCodec, (const int, webrtc::VideoCodec&));
+ WEBRTC_STUB_CONST(GetCodecConfigParameters, (const int,
+ unsigned char*, unsigned char&));
+ WEBRTC_STUB(SetImageScaleStatus, (const int, const bool));
+ WEBRTC_STUB_CONST(GetSendCodecStastistics, (const int,
+ unsigned int&, unsigned int&));
+ WEBRTC_STUB_CONST(GetReceiveCodecStastistics, (const int,
+ unsigned int&, unsigned int&));
+ WEBRTC_STUB_CONST(GetReceiveSideDelay, (const int video_channel,
+ int* delay_ms));
+ WEBRTC_FUNC_CONST(GetCodecTargetBitrate, (const int channel,
+ unsigned int* codec_target_bitrate)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+
+ std::map<int, Channel*>::const_iterator it = channels_.find(channel);
+ if (it->second->send) {
+ // Assume the encoder produces the expected rate.
+ *codec_target_bitrate = it->second->send_video_bitrate_;
+ } else {
+ *codec_target_bitrate = 0;
+ }
+ return 0;
+ }
+ virtual unsigned int GetDiscardedPackets(const int channel) const {
+ return 0;
+ }
+
+ WEBRTC_STUB(SetKeyFrameRequestCallbackStatus, (const int, const bool));
+ WEBRTC_STUB(SetSignalKeyPacketLossStatus, (const int, const bool,
+ const bool));
+ WEBRTC_STUB(RegisterEncoderObserver, (const int,
+ webrtc::ViEEncoderObserver&));
+ WEBRTC_STUB(DeregisterEncoderObserver, (const int));
+ WEBRTC_STUB(RegisterDecoderObserver, (const int,
+ webrtc::ViEDecoderObserver&));
+ WEBRTC_STUB(DeregisterDecoderObserver, (const int));
+ WEBRTC_STUB(SendKeyFrame, (const int));
+ WEBRTC_STUB(WaitForFirstKeyFrame, (const int, const bool));
+ WEBRTC_STUB(StartDebugRecording, (int, const char*));
+ WEBRTC_STUB(StopDebugRecording, (int));
+
+ // webrtc::ViECapture
+ WEBRTC_STUB(NumberOfCaptureDevices, ());
+ WEBRTC_STUB(GetCaptureDevice, (unsigned int, char*,
+ const unsigned int, char*, const unsigned int));
+ WEBRTC_STUB(AllocateCaptureDevice, (const char*, const unsigned int, int&));
+ WEBRTC_FUNC(AllocateExternalCaptureDevice,
+ (int& capture_id, webrtc::ViEExternalCapture*& capture)) {
+ if (fail_alloc_capturer_) {
+ return -1;
+ }
+ if (kViECaptureIdMax == last_capturer_) {
+ return -1;
+ }
+ Capturer* cap = new Capturer();
+ capturers_[++last_capturer_] = cap;
+ capture_id = last_capturer_;
+ capture = cap;
+ return 0;
+ }
+ WEBRTC_STUB(AllocateCaptureDevice, (webrtc::VideoCaptureModule&, int&));
+ WEBRTC_FUNC(ReleaseCaptureDevice, (const int capture_id)) {
+ WEBRTC_CHECK_CAPTURER(capture_id);
+ delete capturers_[capture_id];
+ capturers_.erase(capture_id);
+ return 0;
+ }
+ WEBRTC_FUNC(ConnectCaptureDevice, (const int capture_id,
+ const int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ WEBRTC_CHECK_CAPTURER(capture_id);
+ channels_[channel]->capture_id_ = capture_id;
+ capturers_[capture_id]->set_channel_id(channel);
+ return 0;
+ }
+ WEBRTC_FUNC(DisconnectCaptureDevice, (const int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ int capture_id = channels_[channel]->capture_id_;
+ WEBRTC_CHECK_CAPTURER(capture_id);
+ channels_[channel]->capture_id_ = -1;
+ capturers_[capture_id]->set_channel_id(-1);
+ return 0;
+ }
+ WEBRTC_STUB(StartCapture, (const int, const webrtc::CaptureCapability&));
+ WEBRTC_STUB(StopCapture, (const int));
+ WEBRTC_STUB(SetRotateCapturedFrames, (const int,
+ const webrtc::RotateCapturedFrame));
+ WEBRTC_STUB(SetCaptureDelay, (const int, const unsigned int));
+ WEBRTC_STUB(NumberOfCapabilities, (const char*, const unsigned int));
+ WEBRTC_STUB(GetCaptureCapability, (const char*, const unsigned int,
+ const unsigned int, webrtc::CaptureCapability&));
+ WEBRTC_STUB(ShowCaptureSettingsDialogBox, (const char*, const unsigned int,
+ const char*, void*, const unsigned int, const unsigned int));
+ WEBRTC_STUB(GetOrientation, (const char*, webrtc::RotateCapturedFrame&));
+ WEBRTC_STUB(EnableBrightnessAlarm, (const int, const bool));
+ WEBRTC_STUB(RegisterObserver, (const int, webrtc::ViECaptureObserver&));
+ WEBRTC_STUB(DeregisterObserver, (const int));
+
+ // webrtc::ViENetwork
+ WEBRTC_VOID_FUNC(SetNetworkTransmissionState, (const int channel,
+ const bool is_transmitting)) {
+ WEBRTC_ASSERT_CHANNEL(channel);
+ channels_[channel]->can_transmit_ = is_transmitting;
+ }
+ WEBRTC_STUB(RegisterSendTransport, (const int, webrtc::Transport&));
+ WEBRTC_STUB(DeregisterSendTransport, (const int));
+ WEBRTC_STUB(ReceivedRTPPacket, (const int, const void*, const int));
+ WEBRTC_STUB(ReceivedRTCPPacket, (const int, const void*, const int));
+ // Not using WEBRTC_STUB due to bool return value
+ virtual bool IsIPv6Enabled(int channel) { return true; }
+ WEBRTC_STUB(SetMTU, (int, unsigned int));
+ WEBRTC_STUB(SetPacketTimeoutNotification, (const int, bool, int));
+ WEBRTC_STUB(RegisterObserver, (const int, webrtc::ViENetworkObserver&));
+ WEBRTC_STUB(SetPeriodicDeadOrAliveStatus, (const int, const bool,
+ const unsigned int));
+
+ // webrtc::ViERender
+ WEBRTC_STUB(RegisterVideoRenderModule, (webrtc::VideoRender&));
+ WEBRTC_STUB(DeRegisterVideoRenderModule, (webrtc::VideoRender&));
+ WEBRTC_STUB(AddRenderer, (const int, void*, const unsigned int, const float,
+ const float, const float, const float));
+ WEBRTC_FUNC(RemoveRenderer, (const int render_id)) {
+ if (IsCapturerId(render_id)) {
+ WEBRTC_CHECK_CAPTURER(render_id);
+ return 0;
+ } else if (IsChannelId(render_id)) {
+ WEBRTC_CHECK_CHANNEL(render_id);
+ channels_[render_id]->has_renderer_ = false;
+ return 0;
+ }
+ return -1;
+ }
+ WEBRTC_FUNC(StartRender, (const int render_id)) {
+ if (IsCapturerId(render_id)) {
+ WEBRTC_CHECK_CAPTURER(render_id);
+ return 0;
+ } else if (IsChannelId(render_id)) {
+ WEBRTC_CHECK_CHANNEL(render_id);
+ channels_[render_id]->render_started_ = true;
+ return 0;
+ }
+ return -1;
+ }
+ WEBRTC_FUNC(StopRender, (const int render_id)) {
+ if (IsCapturerId(render_id)) {
+ WEBRTC_CHECK_CAPTURER(render_id);
+ return 0;
+ } else if (IsChannelId(render_id)) {
+ WEBRTC_CHECK_CHANNEL(render_id);
+ channels_[render_id]->render_started_ = false;
+ return 0;
+ }
+ return -1;
+ }
+ WEBRTC_STUB(SetExpectedRenderDelay, (int render_id, int render_delay));
+ WEBRTC_STUB(ConfigureRender, (int, const unsigned int, const float,
+ const float, const float, const float));
+ WEBRTC_STUB(MirrorRenderStream, (const int, const bool, const bool,
+ const bool));
+ WEBRTC_FUNC(AddRenderer, (const int render_id,
+ webrtc::RawVideoType video_type,
+ webrtc::ExternalRenderer* renderer)) {
+ if (IsCapturerId(render_id)) {
+ WEBRTC_CHECK_CAPTURER(render_id);
+ return 0;
+ } else if (IsChannelId(render_id)) {
+ WEBRTC_CHECK_CHANNEL(render_id);
+ channels_[render_id]->has_renderer_ = true;
+ return 0;
+ }
+ return -1;
+ }
+
+ // webrtc::ViERTP_RTCP
+ WEBRTC_FUNC(SetLocalSSRC, (const int channel,
+ const unsigned int ssrc,
+ const webrtc::StreamType usage,
+ const unsigned char idx)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->ssrcs_[idx] = ssrc;
+ return 0;
+ }
+ WEBRTC_STUB_CONST(SetRemoteSSRCType, (const int,
+ const webrtc::StreamType, const unsigned int));
+
+ WEBRTC_FUNC_CONST(GetLocalSSRC, (const int channel,
+ unsigned int& ssrc)) {
+ // ssrcs_[0] is the default local ssrc.
+ WEBRTC_CHECK_CHANNEL(channel);
+ ssrc = channels_.find(channel)->second->ssrcs_[0];
+ return 0;
+ }
+ WEBRTC_STUB_CONST(GetRemoteSSRC, (const int, unsigned int&));
+ WEBRTC_STUB_CONST(GetRemoteCSRCs, (const int, unsigned int*));
+
+ WEBRTC_STUB(SetRtxSendPayloadType, (const int, const uint8));
+ WEBRTC_STUB(SetRtxReceivePayloadType, (const int, const uint8));
+
+ WEBRTC_STUB(SetStartSequenceNumber, (const int, unsigned short));
+ WEBRTC_FUNC(SetRTCPStatus,
+ (const int channel, const webrtc::ViERTCPMode mode)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->rtcp_status_ = mode;
+ return 0;
+ }
+ WEBRTC_STUB_CONST(GetRTCPStatus, (const int, webrtc::ViERTCPMode&));
+ WEBRTC_FUNC(SetRTCPCName, (const int channel,
+ const char rtcp_cname[KMaxRTCPCNameLength])) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->cname_.assign(rtcp_cname);
+ return 0;
+ }
+ WEBRTC_FUNC_CONST(GetRTCPCName, (const int channel,
+ char rtcp_cname[KMaxRTCPCNameLength])) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ talk_base::strcpyn(rtcp_cname, KMaxRTCPCNameLength,
+ channels_.find(channel)->second->cname_.c_str());
+ return 0;
+ }
+ WEBRTC_STUB_CONST(GetRemoteRTCPCName, (const int, char*));
+ WEBRTC_STUB(SendApplicationDefinedRTCPPacket, (const int, const unsigned char,
+ unsigned int, const char*, unsigned short));
+ WEBRTC_FUNC(SetNACKStatus, (const int channel, const bool enable)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->nack_ = enable;
+ channels_[channel]->hybrid_nack_fec_ = false;
+ return 0;
+ }
+ WEBRTC_STUB(SetFECStatus, (const int, const bool, const unsigned char,
+ const unsigned char));
+ WEBRTC_FUNC(SetHybridNACKFECStatus, (const int channel, const bool enable,
+ const unsigned char red_type, const unsigned char fec_type)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ if (red_type == fec_type ||
+ red_type == channels_[channel]->send_codec.plType ||
+ fec_type == channels_[channel]->send_codec.plType) {
+ return -1;
+ }
+ channels_[channel]->nack_ = false;
+ channels_[channel]->hybrid_nack_fec_ = enable;
+ return 0;
+ }
+ WEBRTC_FUNC(SetKeyFrameRequestMethod,
+ (const int channel,
+ const webrtc::ViEKeyFrameRequestMethod method)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->key_frame_request_method_ = method;
+ return 0;
+ }
+ WEBRTC_FUNC(SetSenderBufferingMode, (int channel, int target_delay)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->sender_target_delay_ = target_delay;
+ return 0;
+ }
+ WEBRTC_FUNC(SetReceiverBufferingMode, (int channel, int target_delay)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->receiver_target_delay_ = target_delay;
+ return 0;
+ }
+ // |Send| and |receive| are stored locally in variables that more clearly
+ // explain what they mean.
+ WEBRTC_FUNC(SetRembStatus, (int channel, bool send, bool receive)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->remb_contribute_ = receive;
+ channels_[channel]->remb_bw_partition_ = send;
+ return 0;
+ }
+ WEBRTC_FUNC(SetTMMBRStatus, (const int channel, const bool enable)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->tmmbr_ = enable;
+ return 0;
+ }
+ WEBRTC_FUNC(SetSendTimestampOffsetStatus, (int channel, bool enable,
+ int id)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->rtp_offset_send_id_ = (enable) ? id : 0;
+ return 0;
+ }
+ WEBRTC_FUNC(SetReceiveTimestampOffsetStatus, (int channel, bool enable,
+ int id)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->rtp_offset_receive_id_ = (enable) ? id : 0;
+ return 0;
+ }
+ WEBRTC_FUNC(SetSendAbsoluteSendTimeStatus, (int channel, bool enable,
+ int id)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->rtp_absolute_send_time_send_id_ = (enable) ? id : 0;
+ return 0;
+ }
+ WEBRTC_FUNC(SetReceiveAbsoluteSendTimeStatus, (int channel, bool enable,
+ int id)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->rtp_absolute_send_time_receive_id_ = (enable) ? id : 0;
+ return 0;
+ }
+ WEBRTC_FUNC(SetTransmissionSmoothingStatus, (int channel, bool enable)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->transmission_smoothing_ = enable;
+ return 0;
+ }
+ WEBRTC_STUB_CONST(GetReceivedRTCPStatistics, (const int, unsigned short&,
+ unsigned int&, unsigned int&, unsigned int&, int&));
+ WEBRTC_STUB_CONST(GetSentRTCPStatistics, (const int, unsigned short&,
+ unsigned int&, unsigned int&, unsigned int&, int&));
+ WEBRTC_STUB_CONST(GetRTPStatistics, (const int, unsigned int&, unsigned int&,
+ unsigned int&, unsigned int&));
+ WEBRTC_FUNC_CONST(GetBandwidthUsage, (const int channel,
+ unsigned int& total_bitrate, unsigned int& video_bitrate,
+ unsigned int& fec_bitrate, unsigned int& nack_bitrate)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ std::map<int, Channel*>::const_iterator it = channels_.find(channel);
+ if (it->second->send) {
+ video_bitrate = it->second->send_video_bitrate_;
+ fec_bitrate = it->second->send_fec_bitrate_;
+ nack_bitrate = it->second->send_nack_bitrate_;
+ total_bitrate = video_bitrate + fec_bitrate + nack_bitrate;
+ } else {
+ total_bitrate = 0;
+ video_bitrate = 0;
+ fec_bitrate = 0;
+ nack_bitrate = 0;
+ }
+ return 0;
+ }
+ WEBRTC_FUNC_CONST(GetEstimatedSendBandwidth, (const int channel,
+ unsigned int* send_bandwidth_estimate)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ std::map<int, Channel*>::const_iterator it = channels_.find(channel);
+ // Assume the current video, fec and nack bitrate sums up to our estimate.
+ if (it->second->send) {
+ *send_bandwidth_estimate = it->second->send_bandwidth_;
+ } else {
+ *send_bandwidth_estimate = 0;
+ }
+ return 0;
+ }
+ WEBRTC_FUNC_CONST(GetEstimatedReceiveBandwidth, (const int channel,
+ unsigned int* receive_bandwidth_estimate)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ std::map<int, Channel*>::const_iterator it = channels_.find(channel);
+ if (it->second->receive_) {
+ // For simplicity, assume all channels receive half of max send rate.
+ *receive_bandwidth_estimate = it->second->receive_bandwidth_;
+ } else {
+ *receive_bandwidth_estimate = 0;
+ }
+ return 0;
+ }
+
+ WEBRTC_STUB(StartRTPDump, (const int, const char*, webrtc::RTPDirections));
+ WEBRTC_STUB(StopRTPDump, (const int, webrtc::RTPDirections));
+ WEBRTC_STUB(RegisterRTPObserver, (const int, webrtc::ViERTPObserver&));
+ WEBRTC_STUB(DeregisterRTPObserver, (const int));
+ WEBRTC_STUB(RegisterRTCPObserver, (const int, webrtc::ViERTCPObserver&));
+ WEBRTC_STUB(DeregisterRTCPObserver, (const int));
+
+ // webrtc::ViEImageProcess
+ WEBRTC_STUB(RegisterCaptureEffectFilter, (const int,
+ webrtc::ViEEffectFilter&));
+ WEBRTC_STUB(DeregisterCaptureEffectFilter, (const int));
+ WEBRTC_STUB(RegisterSendEffectFilter, (const int,
+ webrtc::ViEEffectFilter&));
+ WEBRTC_STUB(DeregisterSendEffectFilter, (const int));
+ WEBRTC_STUB(RegisterRenderEffectFilter, (const int,
+ webrtc::ViEEffectFilter&));
+ WEBRTC_STUB(DeregisterRenderEffectFilter, (const int));
+ WEBRTC_STUB(EnableDeflickering, (const int, const bool));
+ WEBRTC_FUNC(EnableDenoising, (const int capture_id, const bool denoising)) {
+ WEBRTC_CHECK_CAPTURER(capture_id);
+ capturers_[capture_id]->set_denoising(denoising);
+ return 0;
+ }
+ WEBRTC_STUB(EnableColorEnhancement, (const int, const bool));
+
+ // webrtc::ViEExternalCodec
+ WEBRTC_FUNC(RegisterExternalSendCodec,
+ (const int channel, const unsigned char pl_type, webrtc::VideoEncoder*,
+ bool)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->ext_encoder_pl_types_.insert(pl_type);
+ return 0;
+ }
+ WEBRTC_FUNC(DeRegisterExternalSendCodec,
+ (const int channel, const unsigned char pl_type)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->ext_encoder_pl_types_.erase(pl_type);
+ return 0;
+ }
+ WEBRTC_FUNC(RegisterExternalReceiveCodec,
+ (const int channel, const unsigned int pl_type, webrtc::VideoDecoder*,
+ bool, int)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->ext_decoder_pl_types_.insert(pl_type);
+ return 0;
+ }
+ WEBRTC_FUNC(DeRegisterExternalReceiveCodec,
+ (const int channel, const unsigned char pl_type)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->ext_decoder_pl_types_.erase(pl_type);
+ return 0;
+ }
+
+ private:
+ bool IsChannelId(int id) const {
+ return (id >= kViEChannelIdBase && id <= kViEChannelIdMax);
+ }
+ bool IsCapturerId(int id) const {
+ return (id >= kViECaptureIdBase && id <= kViECaptureIdMax);
+ }
+
+ bool inited_;
+ int last_channel_;
+ std::map<int, Channel*> channels_;
+ bool fail_create_channel_;
+ int last_capturer_;
+ std::map<int, Capturer*> capturers_;
+ bool fail_alloc_capturer_;
+ const cricket::VideoCodec* const* codecs_;
+ int num_codecs_;
+ int num_set_send_codecs_; // how many times we call SetSendCodec().
+};
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTC_FAKEWEBRTCVIDEOENGINE_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvoiceengine.h b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvoiceengine.h
new file mode 100644
index 00000000000..4ed38d1d3f6
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/fakewebrtcvoiceengine.h
@@ -0,0 +1,1020 @@
+/*
+ * libjingle
+ * Copyright 2010 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_SESSION_PHONE_FAKEWEBRTCVOICEENGINE_H_
+#define TALK_SESSION_PHONE_FAKEWEBRTCVOICEENGINE_H_
+
+#include <list>
+#include <map>
+#include <vector>
+
+
+#include "talk/base/basictypes.h"
+#include "talk/base/stringutils.h"
+#include "talk/media/base/codec.h"
+#include "talk/media/base/voiceprocessor.h"
+#include "talk/media/webrtc/fakewebrtccommon.h"
+#include "talk/media/webrtc/webrtcvoe.h"
+
+namespace cricket {
+
+// Function returning stats will return these values
+// for all values based on type.
+const int kIntStatValue = 123;
+const float kFractionLostStatValue = 0.5;
+
+static const char kFakeDefaultDeviceName[] = "Fake Default";
+static const int kFakeDefaultDeviceId = -1;
+static const char kFakeDeviceName[] = "Fake Device";
+#ifdef WIN32
+static const int kFakeDeviceId = 0;
+#else
+static const int kFakeDeviceId = 1;
+#endif
+
+
+class FakeWebRtcVoiceEngine
+ : public webrtc::VoEAudioProcessing,
+ public webrtc::VoEBase, public webrtc::VoECodec, public webrtc::VoEDtmf,
+ public webrtc::VoEFile, public webrtc::VoEHardware,
+ public webrtc::VoEExternalMedia, public webrtc::VoENetEqStats,
+ public webrtc::VoENetwork, public webrtc::VoERTP_RTCP,
+ public webrtc::VoEVideoSync, public webrtc::VoEVolumeControl {
+ public:
+ struct DtmfInfo {
+ DtmfInfo()
+ : dtmf_event_code(-1),
+ dtmf_out_of_band(false),
+ dtmf_length_ms(-1) {}
+ int dtmf_event_code;
+ bool dtmf_out_of_band;
+ int dtmf_length_ms;
+ };
+ struct Channel {
+ Channel()
+ : external_transport(false),
+ send(false),
+ playout(false),
+ volume_scale(1.0),
+ volume_pan_left(1.0),
+ volume_pan_right(1.0),
+ file(false),
+ vad(false),
+ fec(false),
+ nack(false),
+ media_processor_registered(false),
+ cn8_type(13),
+ cn16_type(105),
+ dtmf_type(106),
+ fec_type(117),
+ nack_max_packets(0),
+ send_ssrc(0),
+ level_header_ext_(-1) {
+ memset(&send_codec, 0, sizeof(send_codec));
+ }
+ bool external_transport;
+ bool send;
+ bool playout;
+ float volume_scale;
+ float volume_pan_left;
+ float volume_pan_right;
+ bool file;
+ bool vad;
+ bool fec;
+ bool nack;
+ bool media_processor_registered;
+ int cn8_type;
+ int cn16_type;
+ int dtmf_type;
+ int fec_type;
+ int nack_max_packets;
+ uint32 send_ssrc;
+ int level_header_ext_;
+ DtmfInfo dtmf_info;
+ std::vector<webrtc::CodecInst> recv_codecs;
+ webrtc::CodecInst send_codec;
+ std::list<std::string> packets;
+ };
+
+ FakeWebRtcVoiceEngine(const cricket::AudioCodec* const* codecs,
+ int num_codecs)
+ : inited_(false),
+ last_channel_(-1),
+ fail_create_channel_(false),
+ codecs_(codecs),
+ num_codecs_(num_codecs),
+ ec_enabled_(false),
+ ec_metrics_enabled_(false),
+ cng_enabled_(false),
+ ns_enabled_(false),
+ agc_enabled_(false),
+ highpass_filter_enabled_(false),
+ stereo_swapping_enabled_(false),
+ typing_detection_enabled_(false),
+ ec_mode_(webrtc::kEcDefault),
+ aecm_mode_(webrtc::kAecmSpeakerphone),
+ ns_mode_(webrtc::kNsDefault),
+ agc_mode_(webrtc::kAgcDefault),
+ observer_(NULL),
+ playout_fail_channel_(-1),
+ send_fail_channel_(-1),
+ fail_start_recording_microphone_(false),
+ recording_microphone_(false),
+ media_processor_(NULL) {
+ memset(&agc_config_, 0, sizeof(agc_config_));
+ }
+ ~FakeWebRtcVoiceEngine() {
+ // Ought to have all been deleted by the WebRtcVoiceMediaChannel
+ // destructors, but just in case ...
+ for (std::map<int, Channel*>::const_iterator i = channels_.begin();
+ i != channels_.end(); ++i) {
+ delete i->second;
+ }
+ }
+
+ bool IsExternalMediaProcessorRegistered() const {
+ return media_processor_ != NULL;
+ }
+ bool IsInited() const { return inited_; }
+ int GetLastChannel() const { return last_channel_; }
+ int GetChannelFromLocalSsrc(uint32 local_ssrc) const {
+ for (std::map<int, Channel*>::const_iterator iter = channels_.begin();
+ iter != channels_.end(); ++iter) {
+ if (local_ssrc == iter->second->send_ssrc)
+ return iter->first;
+ }
+ return -1;
+ }
+ int GetNumChannels() const { return channels_.size(); }
+ bool GetPlayout(int channel) {
+ return channels_[channel]->playout;
+ }
+ bool GetSend(int channel) {
+ return channels_[channel]->send;
+ }
+ bool GetRecordingMicrophone() {
+ return recording_microphone_;
+ }
+ bool GetVAD(int channel) {
+ return channels_[channel]->vad;
+ }
+ bool GetFEC(int channel) {
+ return channels_[channel]->fec;
+ }
+ bool GetNACK(int channel) {
+ return channels_[channel]->nack;
+ }
+ int GetNACKMaxPackets(int channel) {
+ return channels_[channel]->nack_max_packets;
+ }
+ int GetSendCNPayloadType(int channel, bool wideband) {
+ return (wideband) ?
+ channels_[channel]->cn16_type :
+ channels_[channel]->cn8_type;
+ }
+ int GetSendTelephoneEventPayloadType(int channel) {
+ return channels_[channel]->dtmf_type;
+ }
+ int GetSendFECPayloadType(int channel) {
+ return channels_[channel]->fec_type;
+ }
+ bool CheckPacket(int channel, const void* data, size_t len) {
+ bool result = !CheckNoPacket(channel);
+ if (result) {
+ std::string packet = channels_[channel]->packets.front();
+ result = (packet == std::string(static_cast<const char*>(data), len));
+ channels_[channel]->packets.pop_front();
+ }
+ return result;
+ }
+ bool CheckNoPacket(int channel) {
+ return channels_[channel]->packets.empty();
+ }
+ void TriggerCallbackOnError(int channel_num, int err_code) {
+ ASSERT(observer_ != NULL);
+ observer_->CallbackOnError(channel_num, err_code);
+ }
+ void set_playout_fail_channel(int channel) {
+ playout_fail_channel_ = channel;
+ }
+ void set_send_fail_channel(int channel) {
+ send_fail_channel_ = channel;
+ }
+ void set_fail_start_recording_microphone(
+ bool fail_start_recording_microphone) {
+ fail_start_recording_microphone_ = fail_start_recording_microphone;
+ }
+ void set_fail_create_channel(bool fail_create_channel) {
+ fail_create_channel_ = fail_create_channel;
+ }
+ void TriggerProcessPacket(MediaProcessorDirection direction) {
+ webrtc::ProcessingTypes pt =
+ (direction == cricket::MPD_TX) ?
+ webrtc::kRecordingPerChannel : webrtc::kPlaybackAllChannelsMixed;
+ if (media_processor_ != NULL) {
+ media_processor_->Process(0,
+ pt,
+ NULL,
+ 0,
+ 0,
+ true);
+ }
+ }
+
+ WEBRTC_STUB(Release, ());
+
+ // webrtc::VoEBase
+ WEBRTC_FUNC(RegisterVoiceEngineObserver, (
+ webrtc::VoiceEngineObserver& observer)) {
+ observer_ = &observer;
+ return 0;
+ }
+ WEBRTC_STUB(DeRegisterVoiceEngineObserver, ());
+ WEBRTC_FUNC(Init, (webrtc::AudioDeviceModule* adm,
+ webrtc::AudioProcessing* audioproc)) {
+ inited_ = true;
+ return 0;
+ }
+ WEBRTC_FUNC(Terminate, ()) {
+ inited_ = false;
+ return 0;
+ }
+ virtual webrtc::AudioProcessing* audio_processing() OVERRIDE {
+ return NULL;
+ }
+#ifndef USE_WEBRTC_DEV_BRANCH
+ WEBRTC_STUB(MaxNumOfChannels, ());
+#endif
+ WEBRTC_FUNC(CreateChannel, ()) {
+ if (fail_create_channel_) {
+ return -1;
+ }
+ Channel* ch = new Channel();
+ for (int i = 0; i < NumOfCodecs(); ++i) {
+ webrtc::CodecInst codec;
+ GetCodec(i, codec);
+ ch->recv_codecs.push_back(codec);
+ }
+ channels_[++last_channel_] = ch;
+ return last_channel_;
+ }
+ WEBRTC_FUNC(DeleteChannel, (int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ delete channels_[channel];
+ channels_.erase(channel);
+ return 0;
+ }
+ WEBRTC_STUB(StartReceive, (int channel));
+ WEBRTC_FUNC(StartPlayout, (int channel)) {
+ if (playout_fail_channel_ != channel) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->playout = true;
+ return 0;
+ } else {
+ // When playout_fail_channel_ == channel, fail the StartPlayout on this
+ // channel.
+ return -1;
+ }
+ }
+ WEBRTC_FUNC(StartSend, (int channel)) {
+ if (send_fail_channel_ != channel) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->send = true;
+ return 0;
+ } else {
+ // When send_fail_channel_ == channel, fail the StartSend on this
+ // channel.
+ return -1;
+ }
+ }
+ WEBRTC_STUB(StopReceive, (int channel));
+ WEBRTC_FUNC(StopPlayout, (int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->playout = false;
+ return 0;
+ }
+ WEBRTC_FUNC(StopSend, (int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->send = false;
+ return 0;
+ }
+ WEBRTC_STUB(GetVersion, (char version[1024]));
+ WEBRTC_STUB(LastError, ());
+ WEBRTC_STUB(SetOnHoldStatus, (int, bool, webrtc::OnHoldModes));
+ WEBRTC_STUB(GetOnHoldStatus, (int, bool&, webrtc::OnHoldModes&));
+ WEBRTC_STUB(SetNetEQPlayoutMode, (int, webrtc::NetEqModes));
+ WEBRTC_STUB(GetNetEQPlayoutMode, (int, webrtc::NetEqModes&));
+
+ // webrtc::VoECodec
+ WEBRTC_FUNC(NumOfCodecs, ()) {
+ return num_codecs_;
+ }
+ WEBRTC_FUNC(GetCodec, (int index, webrtc::CodecInst& codec)) {
+ if (index < 0 || index >= NumOfCodecs()) {
+ return -1;
+ }
+ const cricket::AudioCodec& c(*codecs_[index]);
+ codec.pltype = c.id;
+ talk_base::strcpyn(codec.plname, sizeof(codec.plname), c.name.c_str());
+ codec.plfreq = c.clockrate;
+ codec.pacsize = 0;
+ codec.channels = c.channels;
+ codec.rate = c.bitrate;
+ return 0;
+ }
+ WEBRTC_FUNC(SetSendCodec, (int channel, const webrtc::CodecInst& codec)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->send_codec = codec;
+ return 0;
+ }
+ WEBRTC_FUNC(GetSendCodec, (int channel, webrtc::CodecInst& codec)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ codec = channels_[channel]->send_codec;
+ return 0;
+ }
+ WEBRTC_STUB(SetSecondarySendCodec, (int channel,
+ const webrtc::CodecInst& codec,
+ int red_payload_type));
+ WEBRTC_STUB(RemoveSecondarySendCodec, (int channel));
+ WEBRTC_STUB(GetSecondarySendCodec, (int channel,
+ webrtc::CodecInst& codec));
+ WEBRTC_STUB(GetRecCodec, (int channel, webrtc::CodecInst& codec));
+ WEBRTC_STUB(SetAMREncFormat, (int channel, webrtc::AmrMode mode));
+ WEBRTC_STUB(SetAMRDecFormat, (int channel, webrtc::AmrMode mode));
+ WEBRTC_STUB(SetAMRWbEncFormat, (int channel, webrtc::AmrMode mode));
+ WEBRTC_STUB(SetAMRWbDecFormat, (int channel, webrtc::AmrMode mode));
+ WEBRTC_STUB(SetISACInitTargetRate, (int channel, int rateBps,
+ bool useFixedFrameSize));
+ WEBRTC_STUB(SetISACMaxRate, (int channel, int rateBps));
+ WEBRTC_STUB(SetISACMaxPayloadSize, (int channel, int sizeBytes));
+ WEBRTC_FUNC(SetRecPayloadType, (int channel,
+ const webrtc::CodecInst& codec)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ Channel* ch = channels_[channel];
+ if (ch->playout)
+ return -1; // Channel is in use.
+ // Check if something else already has this slot.
+ if (codec.pltype != -1) {
+ for (std::vector<webrtc::CodecInst>::iterator it =
+ ch->recv_codecs.begin(); it != ch->recv_codecs.end(); ++it) {
+ if (it->pltype == codec.pltype &&
+ _stricmp(it->plname, codec.plname) != 0) {
+ return -1;
+ }
+ }
+ }
+ // Otherwise try to find this codec and update its payload type.
+ for (std::vector<webrtc::CodecInst>::iterator it = ch->recv_codecs.begin();
+ it != ch->recv_codecs.end(); ++it) {
+ if (strcmp(it->plname, codec.plname) == 0 &&
+ it->plfreq == codec.plfreq) {
+ it->pltype = codec.pltype;
+ it->channels = codec.channels;
+ return 0;
+ }
+ }
+ return -1; // not found
+ }
+ WEBRTC_FUNC(SetSendCNPayloadType, (int channel, int type,
+ webrtc::PayloadFrequencies frequency)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ if (frequency == webrtc::kFreq8000Hz) {
+ channels_[channel]->cn8_type = type;
+ } else if (frequency == webrtc::kFreq16000Hz) {
+ channels_[channel]->cn16_type = type;
+ }
+ return 0;
+ }
+ WEBRTC_FUNC(GetRecPayloadType, (int channel, webrtc::CodecInst& codec)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ Channel* ch = channels_[channel];
+ for (std::vector<webrtc::CodecInst>::iterator it = ch->recv_codecs.begin();
+ it != ch->recv_codecs.end(); ++it) {
+ if (strcmp(it->plname, codec.plname) == 0 &&
+ it->plfreq == codec.plfreq &&
+ it->channels == codec.channels &&
+ it->pltype != -1) {
+ codec.pltype = it->pltype;
+ return 0;
+ }
+ }
+ return -1; // not found
+ }
+ WEBRTC_FUNC(SetVADStatus, (int channel, bool enable, webrtc::VadModes mode,
+ bool disableDTX)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ if (channels_[channel]->send_codec.channels == 2) {
+ // Replicating VoE behavior; VAD cannot be enabled for stereo.
+ return -1;
+ }
+ channels_[channel]->vad = enable;
+ return 0;
+ }
+ WEBRTC_STUB(GetVADStatus, (int channel, bool& enabled,
+ webrtc::VadModes& mode, bool& disabledDTX));
+
+ // webrtc::VoEDtmf
+ WEBRTC_FUNC(SendTelephoneEvent, (int channel, int event_code,
+ bool out_of_band = true, int length_ms = 160, int attenuation_db = 10)) {
+ channels_[channel]->dtmf_info.dtmf_event_code = event_code;
+ channels_[channel]->dtmf_info.dtmf_out_of_band = out_of_band;
+ channels_[channel]->dtmf_info.dtmf_length_ms = length_ms;
+ return 0;
+ }
+
+ WEBRTC_FUNC(SetSendTelephoneEventPayloadType,
+ (int channel, unsigned char type)) {
+ channels_[channel]->dtmf_type = type;
+ return 0;
+ };
+ WEBRTC_STUB(GetSendTelephoneEventPayloadType,
+ (int channel, unsigned char& type));
+
+ WEBRTC_STUB(SetDtmfFeedbackStatus, (bool enable, bool directFeedback));
+ WEBRTC_STUB(GetDtmfFeedbackStatus, (bool& enabled, bool& directFeedback));
+ WEBRTC_STUB(SetDtmfPlayoutStatus, (int channel, bool enable));
+ WEBRTC_STUB(GetDtmfPlayoutStatus, (int channel, bool& enabled));
+
+
+ WEBRTC_FUNC(PlayDtmfTone,
+ (int event_code, int length_ms = 200, int attenuation_db = 10)) {
+ dtmf_info_.dtmf_event_code = event_code;
+ dtmf_info_.dtmf_length_ms = length_ms;
+ return 0;
+ }
+ WEBRTC_STUB(StartPlayingDtmfTone,
+ (int eventCode, int attenuationDb = 10));
+ WEBRTC_STUB(StopPlayingDtmfTone, ());
+
+ // webrtc::VoEFile
+ WEBRTC_FUNC(StartPlayingFileLocally, (int channel, const char* fileNameUTF8,
+ bool loop, webrtc::FileFormats format,
+ float volumeScaling, int startPointMs,
+ int stopPointMs)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->file = true;
+ return 0;
+ }
+ WEBRTC_FUNC(StartPlayingFileLocally, (int channel, webrtc::InStream* stream,
+ webrtc::FileFormats format,
+ float volumeScaling, int startPointMs,
+ int stopPointMs)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->file = true;
+ return 0;
+ }
+ WEBRTC_FUNC(StopPlayingFileLocally, (int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->file = false;
+ return 0;
+ }
+ WEBRTC_FUNC(IsPlayingFileLocally, (int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ return (channels_[channel]->file) ? 1 : 0;
+ }
+ WEBRTC_STUB(ScaleLocalFilePlayout, (int channel, float scale));
+ WEBRTC_STUB(StartPlayingFileAsMicrophone, (int channel,
+ const char* fileNameUTF8,
+ bool loop,
+ bool mixWithMicrophone,
+ webrtc::FileFormats format,
+ float volumeScaling));
+ WEBRTC_STUB(StartPlayingFileAsMicrophone, (int channel,
+ webrtc::InStream* stream,
+ bool mixWithMicrophone,
+ webrtc::FileFormats format,
+ float volumeScaling));
+ WEBRTC_STUB(StopPlayingFileAsMicrophone, (int channel));
+ WEBRTC_STUB(IsPlayingFileAsMicrophone, (int channel));
+ WEBRTC_STUB(ScaleFileAsMicrophonePlayout, (int channel, float scale));
+ WEBRTC_STUB(StartRecordingPlayout, (int channel, const char* fileNameUTF8,
+ webrtc::CodecInst* compression,
+ int maxSizeBytes));
+ WEBRTC_STUB(StartRecordingPlayout, (int channel, webrtc::OutStream* stream,
+ webrtc::CodecInst* compression));
+ WEBRTC_STUB(StopRecordingPlayout, (int channel));
+ WEBRTC_FUNC(StartRecordingMicrophone, (const char* fileNameUTF8,
+ webrtc::CodecInst* compression,
+ int maxSizeBytes)) {
+ if (fail_start_recording_microphone_) {
+ return -1;
+ }
+ recording_microphone_ = true;
+ return 0;
+ }
+ WEBRTC_FUNC(StartRecordingMicrophone, (webrtc::OutStream* stream,
+ webrtc::CodecInst* compression)) {
+ if (fail_start_recording_microphone_) {
+ return -1;
+ }
+ recording_microphone_ = true;
+ return 0;
+ }
+ WEBRTC_FUNC(StopRecordingMicrophone, ()) {
+ if (!recording_microphone_) {
+ return -1;
+ }
+ recording_microphone_ = false;
+ return 0;
+ }
+ WEBRTC_STUB(ConvertPCMToWAV, (const char* fileNameInUTF8,
+ const char* fileNameOutUTF8));
+ WEBRTC_STUB(ConvertPCMToWAV, (webrtc::InStream* streamIn,
+ webrtc::OutStream* streamOut));
+ WEBRTC_STUB(ConvertWAVToPCM, (const char* fileNameInUTF8,
+ const char* fileNameOutUTF8));
+ WEBRTC_STUB(ConvertWAVToPCM, (webrtc::InStream* streamIn,
+ webrtc::OutStream* streamOut));
+ WEBRTC_STUB(ConvertPCMToCompressed, (const char* fileNameInUTF8,
+ const char* fileNameOutUTF8,
+ webrtc::CodecInst* compression));
+ WEBRTC_STUB(ConvertPCMToCompressed, (webrtc::InStream* streamIn,
+ webrtc::OutStream* streamOut,
+ webrtc::CodecInst* compression));
+ WEBRTC_STUB(ConvertCompressedToPCM, (const char* fileNameInUTF8,
+ const char* fileNameOutUTF8));
+ WEBRTC_STUB(ConvertCompressedToPCM, (webrtc::InStream* streamIn,
+ webrtc::OutStream* streamOut));
+ WEBRTC_STUB(GetFileDuration, (const char* fileNameUTF8, int& durationMs,
+ webrtc::FileFormats format));
+ WEBRTC_STUB(GetPlaybackPosition, (int channel, int& positionMs));
+
+ // webrtc::VoEHardware
+ WEBRTC_STUB(GetCPULoad, (int&));
+ WEBRTC_FUNC(GetNumOfRecordingDevices, (int& num)) {
+ return GetNumDevices(num);
+ }
+ WEBRTC_FUNC(GetNumOfPlayoutDevices, (int& num)) {
+ return GetNumDevices(num);
+ }
+ WEBRTC_FUNC(GetRecordingDeviceName, (int i, char* name, char* guid)) {
+ return GetDeviceName(i, name, guid);
+ }
+ WEBRTC_FUNC(GetPlayoutDeviceName, (int i, char* name, char* guid)) {
+ return GetDeviceName(i, name, guid);
+ }
+ WEBRTC_STUB(SetRecordingDevice, (int, webrtc::StereoChannel));
+ WEBRTC_STUB(SetPlayoutDevice, (int));
+ WEBRTC_STUB(SetAudioDeviceLayer, (webrtc::AudioLayers));
+ WEBRTC_STUB(GetAudioDeviceLayer, (webrtc::AudioLayers&));
+ WEBRTC_STUB(GetPlayoutDeviceStatus, (bool&));
+ WEBRTC_STUB(GetRecordingDeviceStatus, (bool&));
+ WEBRTC_STUB(ResetAudioDevice, ());
+ WEBRTC_STUB(AudioDeviceControl, (unsigned int, unsigned int, unsigned int));
+ WEBRTC_STUB(SetLoudspeakerStatus, (bool enable));
+ WEBRTC_STUB(GetLoudspeakerStatus, (bool& enabled));
+ WEBRTC_STUB(SetRecordingSampleRate, (unsigned int samples_per_sec));
+ WEBRTC_STUB_CONST(RecordingSampleRate, (unsigned int* samples_per_sec));
+ WEBRTC_STUB(SetPlayoutSampleRate, (unsigned int samples_per_sec));
+ WEBRTC_STUB_CONST(PlayoutSampleRate, (unsigned int* samples_per_sec));
+ WEBRTC_STUB(EnableBuiltInAEC, (bool enable));
+ virtual bool BuiltInAECIsEnabled() const { return true; }
+
+ // webrtc::VoENetEqStats
+ WEBRTC_STUB(GetNetworkStatistics, (int, webrtc::NetworkStatistics&));
+
+ // webrtc::VoENetwork
+ WEBRTC_FUNC(RegisterExternalTransport, (int channel,
+ webrtc::Transport& transport)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->external_transport = true;
+ return 0;
+ }
+ WEBRTC_FUNC(DeRegisterExternalTransport, (int channel)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->external_transport = false;
+ return 0;
+ }
+ WEBRTC_FUNC(ReceivedRTPPacket, (int channel, const void* data,
+ unsigned int length)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ if (!channels_[channel]->external_transport) return -1;
+ channels_[channel]->packets.push_back(
+ std::string(static_cast<const char*>(data), length));
+ return 0;
+ }
+ WEBRTC_STUB(ReceivedRTCPPacket, (int channel, const void* data,
+ unsigned int length));
+ // Not using WEBRTC_STUB due to bool return value
+ WEBRTC_STUB(SetPacketTimeoutNotification, (int channel, bool enable,
+ int timeoutSeconds));
+ WEBRTC_STUB(GetPacketTimeoutNotification, (int channel, bool& enable,
+ int& timeoutSeconds));
+ WEBRTC_STUB(RegisterDeadOrAliveObserver, (int channel,
+ webrtc::VoEConnectionObserver& observer));
+ WEBRTC_STUB(DeRegisterDeadOrAliveObserver, (int channel));
+ WEBRTC_STUB(GetPeriodicDeadOrAliveStatus, (int channel, bool& enabled,
+ int& sampleTimeSeconds));
+ WEBRTC_STUB(SetPeriodicDeadOrAliveStatus, (int channel, bool enable,
+ int sampleTimeSeconds));
+
+ // webrtc::VoERTP_RTCP
+ WEBRTC_STUB(RegisterRTPObserver, (int channel,
+ webrtc::VoERTPObserver& observer));
+ WEBRTC_STUB(DeRegisterRTPObserver, (int channel));
+ WEBRTC_STUB(RegisterRTCPObserver, (int channel,
+ webrtc::VoERTCPObserver& observer));
+ WEBRTC_STUB(DeRegisterRTCPObserver, (int channel));
+ WEBRTC_FUNC(SetLocalSSRC, (int channel, unsigned int ssrc)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->send_ssrc = ssrc;
+ return 0;
+ }
+ WEBRTC_FUNC(GetLocalSSRC, (int channel, unsigned int& ssrc)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ ssrc = channels_[channel]->send_ssrc;
+ return 0;
+ }
+ WEBRTC_STUB(GetRemoteSSRC, (int channel, unsigned int& ssrc));
+ WEBRTC_FUNC(SetRTPAudioLevelIndicationStatus, (int channel, bool enable,
+ unsigned char id)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ if (enable && (id < 1 || id > 14)) {
+ // [RFC5285] The 4-bit ID is the local identifier of this element in
+ // the range 1-14 inclusive.
+ return -1;
+ }
+ channels_[channel]->level_header_ext_ = (enable) ? id : -1;
+ return 0;
+ }
+ WEBRTC_FUNC(GetRTPAudioLevelIndicationStatus, (int channel, bool& enabled,
+ unsigned char& id)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ enabled = (channels_[channel]->level_header_ext_ != -1);
+ id = channels_[channel]->level_header_ext_;
+ return 0;
+ }
+ WEBRTC_STUB(GetRemoteCSRCs, (int channel, unsigned int arrCSRC[15]));
+ WEBRTC_STUB(SetRTCPStatus, (int channel, bool enable));
+ WEBRTC_STUB(GetRTCPStatus, (int channel, bool& enabled));
+ WEBRTC_STUB(SetRTCP_CNAME, (int channel, const char cname[256]));
+ WEBRTC_STUB(GetRTCP_CNAME, (int channel, char cname[256]));
+ WEBRTC_STUB(GetRemoteRTCP_CNAME, (int channel, char* cname));
+ WEBRTC_STUB(GetRemoteRTCPData, (int channel, unsigned int& NTPHigh,
+ unsigned int& NTPLow,
+ unsigned int& timestamp,
+ unsigned int& playoutTimestamp,
+ unsigned int* jitter,
+ unsigned short* fractionLost));
+ WEBRTC_STUB(GetRemoteRTCPSenderInfo, (int channel,
+ webrtc::SenderInfo* sender_info));
+ WEBRTC_FUNC(GetRemoteRTCPReportBlocks,
+ (int channel, std::vector<webrtc::ReportBlock>* receive_blocks)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ webrtc::ReportBlock block;
+ block.source_SSRC = channels_[channel]->send_ssrc;
+ webrtc::CodecInst send_codec = channels_[channel]->send_codec;
+ if (send_codec.pltype >= 0) {
+ block.fraction_lost = (unsigned char)(kFractionLostStatValue * 256);
+ if (send_codec.plfreq / 1000 > 0) {
+ block.interarrival_jitter = kIntStatValue * (send_codec.plfreq / 1000);
+ }
+ block.cumulative_num_packets_lost = kIntStatValue;
+ block.extended_highest_sequence_number = kIntStatValue;
+ receive_blocks->push_back(block);
+ }
+ return 0;
+ }
+ WEBRTC_STUB(SendApplicationDefinedRTCPPacket, (int channel,
+ unsigned char subType,
+ unsigned int name,
+ const char* data,
+ unsigned short dataLength));
+ WEBRTC_STUB(GetRTPStatistics, (int channel, unsigned int& averageJitterMs,
+ unsigned int& maxJitterMs,
+ unsigned int& discardedPackets));
+ WEBRTC_FUNC(GetRTCPStatistics, (int channel, webrtc::CallStatistics& stats)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ stats.fractionLost = static_cast<int16>(kIntStatValue);
+ stats.cumulativeLost = kIntStatValue;
+ stats.extendedMax = kIntStatValue;
+ stats.jitterSamples = kIntStatValue;
+ stats.rttMs = kIntStatValue;
+ stats.bytesSent = kIntStatValue;
+ stats.packetsSent = kIntStatValue;
+ stats.bytesReceived = kIntStatValue;
+ stats.packetsReceived = kIntStatValue;
+ return 0;
+ }
+ WEBRTC_FUNC(SetFECStatus, (int channel, bool enable, int redPayloadtype)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->fec = enable;
+ channels_[channel]->fec_type = redPayloadtype;
+ return 0;
+ }
+ WEBRTC_FUNC(GetFECStatus, (int channel, bool& enable, int& redPayloadtype)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ enable = channels_[channel]->fec;
+ redPayloadtype = channels_[channel]->fec_type;
+ return 0;
+ }
+ WEBRTC_FUNC(SetNACKStatus, (int channel, bool enable, int maxNoPackets)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->nack = enable;
+ channels_[channel]->nack_max_packets = maxNoPackets;
+ return 0;
+ }
+ WEBRTC_STUB(StartRTPDump, (int channel, const char* fileNameUTF8,
+ webrtc::RTPDirections direction));
+ WEBRTC_STUB(StopRTPDump, (int channel, webrtc::RTPDirections direction));
+ WEBRTC_STUB(RTPDumpIsActive, (int channel, webrtc::RTPDirections direction));
+ WEBRTC_STUB(InsertExtraRTPPacket, (int channel, unsigned char payloadType,
+ bool markerBit, const char* payloadData,
+ unsigned short payloadSize));
+ WEBRTC_STUB(GetLastRemoteTimeStamp, (int channel,
+ uint32_t* lastRemoteTimeStamp));
+
+ // webrtc::VoEVideoSync
+ WEBRTC_STUB(GetPlayoutBufferSize, (int& bufferMs));
+ WEBRTC_STUB(GetPlayoutTimestamp, (int channel, unsigned int& timestamp));
+ WEBRTC_STUB(GetRtpRtcp, (int, webrtc::RtpRtcp*&));
+ WEBRTC_STUB(SetInitTimestamp, (int channel, unsigned int timestamp));
+ WEBRTC_STUB(SetInitSequenceNumber, (int channel, short sequenceNumber));
+ WEBRTC_STUB(SetMinimumPlayoutDelay, (int channel, int delayMs));
+ WEBRTC_STUB(SetInitialPlayoutDelay, (int channel, int delay_ms));
+ WEBRTC_STUB(GetDelayEstimate, (int channel, int* jitter_buffer_delay_ms,
+ int* playout_buffer_delay_ms));
+ WEBRTC_STUB_CONST(GetLeastRequiredDelayMs, (int channel));
+
+ // webrtc::VoEVolumeControl
+ WEBRTC_STUB(SetSpeakerVolume, (unsigned int));
+ WEBRTC_STUB(GetSpeakerVolume, (unsigned int&));
+ WEBRTC_STUB(SetSystemOutputMute, (bool));
+ WEBRTC_STUB(GetSystemOutputMute, (bool&));
+ WEBRTC_STUB(SetMicVolume, (unsigned int));
+ WEBRTC_STUB(GetMicVolume, (unsigned int&));
+ WEBRTC_STUB(SetInputMute, (int, bool));
+ WEBRTC_STUB(GetInputMute, (int, bool&));
+ WEBRTC_STUB(SetSystemInputMute, (bool));
+ WEBRTC_STUB(GetSystemInputMute, (bool&));
+ WEBRTC_STUB(GetSpeechInputLevel, (unsigned int&));
+ WEBRTC_STUB(GetSpeechOutputLevel, (int, unsigned int&));
+ WEBRTC_STUB(GetSpeechInputLevelFullRange, (unsigned int&));
+ WEBRTC_STUB(GetSpeechOutputLevelFullRange, (int, unsigned int&));
+ WEBRTC_FUNC(SetChannelOutputVolumeScaling, (int channel, float scale)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->volume_scale= scale;
+ return 0;
+ }
+ WEBRTC_FUNC(GetChannelOutputVolumeScaling, (int channel, float& scale)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ scale = channels_[channel]->volume_scale;
+ return 0;
+ }
+ WEBRTC_FUNC(SetOutputVolumePan, (int channel, float left, float right)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ channels_[channel]->volume_pan_left = left;
+ channels_[channel]->volume_pan_right = right;
+ return 0;
+ }
+ WEBRTC_FUNC(GetOutputVolumePan, (int channel, float& left, float& right)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ left = channels_[channel]->volume_pan_left;
+ right = channels_[channel]->volume_pan_right;
+ return 0;
+ }
+
+ // webrtc::VoEAudioProcessing
+ WEBRTC_FUNC(SetNsStatus, (bool enable, webrtc::NsModes mode)) {
+ ns_enabled_ = enable;
+ ns_mode_ = mode;
+ return 0;
+ }
+ WEBRTC_FUNC(GetNsStatus, (bool& enabled, webrtc::NsModes& mode)) {
+ enabled = ns_enabled_;
+ mode = ns_mode_;
+ return 0;
+ }
+
+ WEBRTC_FUNC(SetAgcStatus, (bool enable, webrtc::AgcModes mode)) {
+ agc_enabled_ = enable;
+ agc_mode_ = mode;
+ return 0;
+ }
+ WEBRTC_FUNC(GetAgcStatus, (bool& enabled, webrtc::AgcModes& mode)) {
+ enabled = agc_enabled_;
+ mode = agc_mode_;
+ return 0;
+ }
+
+ WEBRTC_FUNC(SetAgcConfig, (webrtc::AgcConfig config)) {
+ agc_config_ = config;
+ return 0;
+ }
+ WEBRTC_FUNC(GetAgcConfig, (webrtc::AgcConfig& config)) {
+ config = agc_config_;
+ return 0;
+ }
+ WEBRTC_FUNC(SetEcStatus, (bool enable, webrtc::EcModes mode)) {
+ ec_enabled_ = enable;
+ ec_mode_ = mode;
+ return 0;
+ }
+ WEBRTC_FUNC(GetEcStatus, (bool& enabled, webrtc::EcModes& mode)) {
+ enabled = ec_enabled_;
+ mode = ec_mode_;
+ return 0;
+ }
+ WEBRTC_STUB(EnableDriftCompensation, (bool enable))
+ WEBRTC_BOOL_STUB(DriftCompensationEnabled, ())
+ WEBRTC_VOID_STUB(SetDelayOffsetMs, (int offset))
+ WEBRTC_STUB(DelayOffsetMs, ());
+ WEBRTC_FUNC(SetAecmMode, (webrtc::AecmModes mode, bool enableCNG)) {
+ aecm_mode_ = mode;
+ cng_enabled_ = enableCNG;
+ return 0;
+ }
+ WEBRTC_FUNC(GetAecmMode, (webrtc::AecmModes& mode, bool& enabledCNG)) {
+ mode = aecm_mode_;
+ enabledCNG = cng_enabled_;
+ return 0;
+ }
+ WEBRTC_STUB(SetRxNsStatus, (int channel, bool enable, webrtc::NsModes mode));
+ WEBRTC_STUB(GetRxNsStatus, (int channel, bool& enabled,
+ webrtc::NsModes& mode));
+ WEBRTC_STUB(SetRxAgcStatus, (int channel, bool enable,
+ webrtc::AgcModes mode));
+ WEBRTC_STUB(GetRxAgcStatus, (int channel, bool& enabled,
+ webrtc::AgcModes& mode));
+ WEBRTC_STUB(SetRxAgcConfig, (int channel, webrtc::AgcConfig config));
+ WEBRTC_STUB(GetRxAgcConfig, (int channel, webrtc::AgcConfig& config));
+
+ WEBRTC_STUB(RegisterRxVadObserver, (int, webrtc::VoERxVadCallback&));
+ WEBRTC_STUB(DeRegisterRxVadObserver, (int channel));
+ WEBRTC_STUB(VoiceActivityIndicator, (int channel));
+ WEBRTC_FUNC(SetEcMetricsStatus, (bool enable)) {
+ ec_metrics_enabled_ = enable;
+ return 0;
+ }
+ WEBRTC_FUNC(GetEcMetricsStatus, (bool& enabled)) {
+ enabled = ec_metrics_enabled_;
+ return 0;
+ }
+ WEBRTC_STUB(GetEchoMetrics, (int& ERL, int& ERLE, int& RERL, int& A_NLP));
+ WEBRTC_STUB(GetEcDelayMetrics, (int& delay_median, int& delay_std));
+
+ WEBRTC_STUB(StartDebugRecording, (const char* fileNameUTF8));
+ WEBRTC_STUB(StopDebugRecording, ());
+
+ WEBRTC_FUNC(SetTypingDetectionStatus, (bool enable)) {
+ typing_detection_enabled_ = enable;
+ return 0;
+ }
+ WEBRTC_FUNC(GetTypingDetectionStatus, (bool& enabled)) {
+ enabled = typing_detection_enabled_;
+ return 0;
+ }
+
+ WEBRTC_STUB(TimeSinceLastTyping, (int& seconds));
+ WEBRTC_STUB(SetTypingDetectionParameters, (int timeWindow,
+ int costPerTyping,
+ int reportingThreshold,
+ int penaltyDecay,
+ int typeEventDelay));
+ int EnableHighPassFilter(bool enable) {
+ highpass_filter_enabled_ = enable;
+ return 0;
+ }
+ bool IsHighPassFilterEnabled() {
+ return highpass_filter_enabled_;
+ }
+ bool IsStereoChannelSwappingEnabled() {
+ return stereo_swapping_enabled_;
+ }
+ void EnableStereoChannelSwapping(bool enable) {
+ stereo_swapping_enabled_ = enable;
+ }
+ bool WasSendTelephoneEventCalled(int channel, int event_code, int length_ms) {
+ return (channels_[channel]->dtmf_info.dtmf_event_code == event_code &&
+ channels_[channel]->dtmf_info.dtmf_out_of_band == true &&
+ channels_[channel]->dtmf_info.dtmf_length_ms == length_ms);
+ }
+ bool WasPlayDtmfToneCalled(int event_code, int length_ms) {
+ return (dtmf_info_.dtmf_event_code == event_code &&
+ dtmf_info_.dtmf_length_ms == length_ms);
+ }
+ // webrtc::VoEExternalMedia
+ WEBRTC_FUNC(RegisterExternalMediaProcessing,
+ (int channel, webrtc::ProcessingTypes type,
+ webrtc::VoEMediaProcess& processObject)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ if (channels_[channel]->media_processor_registered) {
+ return -1;
+ }
+ channels_[channel]->media_processor_registered = true;
+ media_processor_ = &processObject;
+ return 0;
+ }
+ WEBRTC_FUNC(DeRegisterExternalMediaProcessing,
+ (int channel, webrtc::ProcessingTypes type)) {
+ WEBRTC_CHECK_CHANNEL(channel);
+ if (!channels_[channel]->media_processor_registered) {
+ return -1;
+ }
+ channels_[channel]->media_processor_registered = false;
+ media_processor_ = NULL;
+ return 0;
+ }
+ WEBRTC_STUB(SetExternalRecordingStatus, (bool enable));
+ WEBRTC_STUB(SetExternalPlayoutStatus, (bool enable));
+ WEBRTC_STUB(ExternalRecordingInsertData,
+ (const int16_t speechData10ms[], int lengthSamples,
+ int samplingFreqHz, int current_delay_ms));
+ WEBRTC_STUB(ExternalPlayoutGetData,
+ (int16_t speechData10ms[], int samplingFreqHz,
+ int current_delay_ms, int& lengthSamples));
+ WEBRTC_STUB(GetAudioFrame, (int channel, int desired_sample_rate_hz,
+ webrtc::AudioFrame* frame));
+ WEBRTC_STUB(SetExternalMixing, (int channel, bool enable));
+
+ private:
+ int GetNumDevices(int& num) {
+#ifdef WIN32
+ num = 1;
+#else
+ // On non-Windows platforms VE adds a special entry for the default device,
+ // so if there is one physical device then there are two entries in the
+ // list.
+ num = 2;
+#endif
+ return 0;
+ }
+
+ int GetDeviceName(int i, char* name, char* guid) {
+ const char *s;
+#ifdef WIN32
+ if (0 == i) {
+ s = kFakeDeviceName;
+ } else {
+ return -1;
+ }
+#else
+ // See comment above.
+ if (0 == i) {
+ s = kFakeDefaultDeviceName;
+ } else if (1 == i) {
+ s = kFakeDeviceName;
+ } else {
+ return -1;
+ }
+#endif
+ strcpy(name, s);
+ guid[0] = '\0';
+ return 0;
+ }
+
+ bool inited_;
+ int last_channel_;
+ std::map<int, Channel*> channels_;
+ bool fail_create_channel_;
+ const cricket::AudioCodec* const* codecs_;
+ int num_codecs_;
+ bool ec_enabled_;
+ bool ec_metrics_enabled_;
+ bool cng_enabled_;
+ bool ns_enabled_;
+ bool agc_enabled_;
+ bool highpass_filter_enabled_;
+ bool stereo_swapping_enabled_;
+ bool typing_detection_enabled_;
+ webrtc::EcModes ec_mode_;
+ webrtc::AecmModes aecm_mode_;
+ webrtc::NsModes ns_mode_;
+ webrtc::AgcModes agc_mode_;
+ webrtc::AgcConfig agc_config_;
+ webrtc::VoiceEngineObserver* observer_;
+ int playout_fail_channel_;
+ int send_fail_channel_;
+ bool fail_start_recording_microphone_;
+ bool recording_microphone_;
+ DtmfInfo dtmf_info_;
+ webrtc::VoEMediaProcess* media_processor_;
+};
+
+} // namespace cricket
+
+#endif // TALK_SESSION_PHONE_FAKEWEBRTCVOICEENGINE_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtccommon.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtccommon.h
new file mode 100644
index 00000000000..3a557f11d11
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtccommon.h
@@ -0,0 +1,76 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef TALK_MEDIA_WEBRTCCOMMON_H_
+#define TALK_MEDIA_WEBRTCCOMMON_H_
+
+#include "webrtc/common_types.h"
+
+namespace cricket {
+
+// Tracing helpers, for easy logging when WebRTC calls fail.
+// Example: "LOG_RTCERR1(StartSend, channel);" produces the trace
+// "StartSend(1) failed, err=XXXX"
+// The method GetLastEngineError must be defined in the calling scope.
+#define LOG_RTCERR0(func) \
+ LOG_RTCERR0_EX(func, GetLastEngineError())
+#define LOG_RTCERR1(func, a1) \
+ LOG_RTCERR1_EX(func, a1, GetLastEngineError())
+#define LOG_RTCERR2(func, a1, a2) \
+ LOG_RTCERR2_EX(func, a1, a2, GetLastEngineError())
+#define LOG_RTCERR3(func, a1, a2, a3) \
+ LOG_RTCERR3_EX(func, a1, a2, a3, GetLastEngineError())
+#define LOG_RTCERR4(func, a1, a2, a3, a4) \
+ LOG_RTCERR4_EX(func, a1, a2, a3, a4, GetLastEngineError())
+#define LOG_RTCERR5(func, a1, a2, a3, a4, a5) \
+ LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, GetLastEngineError())
+#define LOG_RTCERR6(func, a1, a2, a3, a4, a5, a6) \
+ LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, GetLastEngineError())
+#define LOG_RTCERR0_EX(func, err) LOG(LS_WARNING) \
+ << "" << #func << "() failed, err=" << err
+#define LOG_RTCERR1_EX(func, a1, err) LOG(LS_WARNING) \
+ << "" << #func << "(" << a1 << ") failed, err=" << err
+#define LOG_RTCERR2_EX(func, a1, a2, err) LOG(LS_WARNING) \
+ << "" << #func << "(" << a1 << ", " << a2 << ") failed, err=" \
+ << err
+#define LOG_RTCERR3_EX(func, a1, a2, a3, err) LOG(LS_WARNING) \
+ << "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
+ << ") failed, err=" << err
+#define LOG_RTCERR4_EX(func, a1, a2, a3, a4, err) LOG(LS_WARNING) \
+ << "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
+ << ", " << a4 << ") failed, err=" << err
+#define LOG_RTCERR5_EX(func, a1, a2, a3, a4, a5, err) LOG(LS_WARNING) \
+ << "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
+ << ", " << a4 << ", " << a5 << ") failed, err=" << err
+#define LOG_RTCERR6_EX(func, a1, a2, a3, a4, a5, a6, err) LOG(LS_WARNING) \
+ << "" << #func << "(" << a1 << ", " << a2 << ", " << a3 \
+ << ", " << a4 << ", " << a5 << ", " << a6 << ") failed, err=" << err
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTCCOMMON_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcexport.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcexport.h
new file mode 100644
index 00000000000..71ebe4e8eac
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcexport.h
@@ -0,0 +1,79 @@
+/*
+ * libjingle
+ * Copyright 2004--2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef TALK_MEDIA_WEBRTC_WEBRTCEXPORT_H_
+#define TALK_MEDIA_WEBRTC_WEBRTCEXPORT_H_
+
+#if !defined(GOOGLE_CHROME_BUILD) && !defined(CHROMIUM_BUILD)
+#define LIBPEERCONNECTION_LIB 1
+#endif
+
+#ifndef NON_EXPORTED_BASE
+#ifdef WIN32
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress:n))
+
+// Allows exporting a class that inherits from a non-exported base class.
+// This uses suppress instead of push/pop because the delimiter after the
+// declaration (either "," or "{") has to be placed before the pop macro.
+//
+// Example usage:
+// class EXPORT_API Foo : NON_EXPORTED_BASE(public Bar) {
+//
+// MSVC Compiler warning C4275:
+// non dll-interface class 'Bar' used as base for dll-interface class 'Foo'.
+// Note that this is intended to be used only when no access to the base class'
+// static data is done through derived classes or inline methods. For more info,
+// see http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+#define NON_EXPORTED_BASE(code) MSVC_SUPPRESS_WARNING(4275) \
+ code
+
+#else // Not WIN32
+#define NON_EXPORTED_BASE(code) code
+#endif // WIN32
+#endif // NON_EXPORTED_BASE
+
+#if defined (LIBPEERCONNECTION_LIB)
+ #define WRME_EXPORT
+#else
+ #if defined(WIN32)
+ #if defined(LIBPEERCONNECTION_IMPLEMENTATION)
+ #define WRME_EXPORT __declspec(dllexport)
+ #else
+ #define WRME_EXPORT __declspec(dllimport)
+ #endif
+ #else // defined(WIN32)
+ #if defined(LIBPEERCONNECTION_IMPLEMENTATION)
+ #define WRME_EXPORT __attribute__((visibility("default")))
+ #else
+ #define WRME_EXPORT
+ #endif
+ #endif
+#endif // LIBPEERCONNECTION_LIB
+
+#endif // TALK_MEDIA_WEBRTC_WEBRTCEXPORT_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcmediaengine.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcmediaengine.h
new file mode 100644
index 00000000000..a2ee6587610
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcmediaengine.h
@@ -0,0 +1,194 @@
+/*
+ * libjingle
+ * Copyright 2011 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_WEBRTCMEDIAENGINE_H_
+#define TALK_MEDIA_WEBRTCMEDIAENGINE_H_
+
+#include "talk/media/base/mediaengine.h"
+#include "talk/media/webrtc/webrtcexport.h"
+
+namespace webrtc {
+class AudioDeviceModule;
+class VideoCaptureModule;
+}
+namespace cricket {
+class WebRtcVideoDecoderFactory;
+class WebRtcVideoEncoderFactory;
+}
+
+
+#if !defined(LIBPEERCONNECTION_LIB) && \
+ !defined(LIBPEERCONNECTION_IMPLEMENTATION)
+
+WRME_EXPORT
+cricket::MediaEngineInterface* CreateWebRtcMediaEngine(
+ webrtc::AudioDeviceModule* adm, webrtc::AudioDeviceModule* adm_sc,
+ cricket::WebRtcVideoEncoderFactory* encoder_factory,
+ cricket::WebRtcVideoDecoderFactory* decoder_factory);
+
+WRME_EXPORT
+void DestroyWebRtcMediaEngine(cricket::MediaEngineInterface* media_engine);
+
+namespace cricket {
+
+class WebRtcMediaEngine : public cricket::MediaEngineInterface {
+ public:
+ WebRtcMediaEngine(
+ webrtc::AudioDeviceModule* adm,
+ webrtc::AudioDeviceModule* adm_sc,
+ cricket::WebRtcVideoEncoderFactory* encoder_factory,
+ cricket::WebRtcVideoDecoderFactory* decoder_factory)
+ : delegate_(CreateWebRtcMediaEngine(
+ adm, adm_sc, encoder_factory, decoder_factory)) {
+ }
+ virtual ~WebRtcMediaEngine() {
+ DestroyWebRtcMediaEngine(delegate_);
+ }
+ virtual bool Init(talk_base::Thread* worker_thread) OVERRIDE {
+ return delegate_->Init(worker_thread);
+ }
+ virtual void Terminate() OVERRIDE {
+ delegate_->Terminate();
+ }
+ virtual int GetCapabilities() OVERRIDE {
+ return delegate_->GetCapabilities();
+ }
+ virtual VoiceMediaChannel* CreateChannel() OVERRIDE {
+ return delegate_->CreateChannel();
+ }
+ virtual VideoMediaChannel* CreateVideoChannel(
+ VoiceMediaChannel* voice_media_channel) OVERRIDE {
+ return delegate_->CreateVideoChannel(voice_media_channel);
+ }
+ virtual SoundclipMedia* CreateSoundclip() OVERRIDE {
+ return delegate_->CreateSoundclip();
+ }
+ virtual bool SetAudioOptions(int options) OVERRIDE {
+ return delegate_->SetAudioOptions(options);
+ }
+ virtual bool SetVideoOptions(int options) OVERRIDE {
+ return delegate_->SetVideoOptions(options);
+ }
+ virtual bool SetAudioDelayOffset(int offset) OVERRIDE {
+ return delegate_->SetAudioDelayOffset(offset);
+ }
+ virtual bool SetDefaultVideoEncoderConfig(
+ const VideoEncoderConfig& config) OVERRIDE {
+ return delegate_->SetDefaultVideoEncoderConfig(config);
+ }
+ virtual bool SetSoundDevices(
+ const Device* in_device, const Device* out_device) OVERRIDE {
+ return delegate_->SetSoundDevices(in_device, out_device);
+ }
+ virtual bool GetOutputVolume(int* level) OVERRIDE {
+ return delegate_->GetOutputVolume(level);
+ }
+ virtual bool SetOutputVolume(int level) OVERRIDE {
+ return delegate_->SetOutputVolume(level);
+ }
+ virtual int GetInputLevel() OVERRIDE {
+ return delegate_->GetInputLevel();
+ }
+ virtual bool SetLocalMonitor(bool enable) OVERRIDE {
+ return delegate_->SetLocalMonitor(enable);
+ }
+ virtual bool SetLocalRenderer(VideoRenderer* renderer) OVERRIDE {
+ return delegate_->SetLocalRenderer(renderer);
+ }
+ virtual const std::vector<AudioCodec>& audio_codecs() OVERRIDE {
+ return delegate_->audio_codecs();
+ }
+ virtual const std::vector<RtpHeaderExtension>&
+ audio_rtp_header_extensions() OVERRIDE {
+ return delegate_->audio_rtp_header_extensions();
+ }
+ virtual const std::vector<VideoCodec>& video_codecs() OVERRIDE {
+ return delegate_->video_codecs();
+ }
+ virtual const std::vector<RtpHeaderExtension>&
+ video_rtp_header_extensions() OVERRIDE {
+ return delegate_->video_rtp_header_extensions();
+ }
+ virtual void SetVoiceLogging(int min_sev, const char* filter) OVERRIDE {
+ delegate_->SetVoiceLogging(min_sev, filter);
+ }
+ virtual void SetVideoLogging(int min_sev, const char* filter) OVERRIDE {
+ delegate_->SetVideoLogging(min_sev, filter);
+ }
+ virtual bool RegisterVoiceProcessor(
+ uint32 ssrc, VoiceProcessor* video_processor,
+ MediaProcessorDirection direction) OVERRIDE {
+ return delegate_->RegisterVoiceProcessor(ssrc, video_processor, direction);
+ }
+ virtual bool UnregisterVoiceProcessor(
+ uint32 ssrc, VoiceProcessor* video_processor,
+ MediaProcessorDirection direction) OVERRIDE {
+ return delegate_->UnregisterVoiceProcessor(ssrc, video_processor,
+ direction);
+ }
+ virtual VideoFormat GetStartCaptureFormat() const OVERRIDE {
+ return delegate_->GetStartCaptureFormat();
+ }
+ virtual sigslot::repeater2<VideoCapturer*, CaptureState>&
+ SignalVideoCaptureStateChange() {
+ return delegate_->SignalVideoCaptureStateChange();
+ }
+
+ private:
+ cricket::MediaEngineInterface* delegate_;
+};
+
+} // namespace cricket
+#else
+
+#include "talk/media/webrtc/webrtcvideoengine.h"
+#include "talk/media/webrtc/webrtcvoiceengine.h"
+
+namespace cricket {
+typedef CompositeMediaEngine<WebRtcVoiceEngine, WebRtcVideoEngine>
+ WebRtcCompositeMediaEngine;
+
+class WebRtcMediaEngine : public WebRtcCompositeMediaEngine {
+ public:
+ WebRtcMediaEngine(webrtc::AudioDeviceModule* adm,
+ webrtc::AudioDeviceModule* adm_sc,
+ WebRtcVideoEncoderFactory* encoder_factory,
+ WebRtcVideoDecoderFactory* decoder_factory) {
+ voice_.SetAudioDeviceModule(adm, adm_sc);
+ video_.SetVoiceEngine(&voice_);
+ video_.EnableTimedRender();
+ video_.SetExternalEncoderFactory(encoder_factory);
+ video_.SetExternalDecoderFactory(decoder_factory);
+ }
+};
+
+} // namespace cricket
+
+#endif // !defined(LIBPEERCONNECTION_LIB) &&
+ // !defined(LIBPEERCONNECTION_IMPLEMENTATION)
+
+#endif // TALK_MEDIA_WEBRTCMEDIAENGINE_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender.cc
new file mode 100644
index 00000000000..b4e78b44e8d
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender.cc
@@ -0,0 +1,176 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/media/webrtc/webrtcpassthroughrender.h"
+
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+
+namespace cricket {
+
+#define LOG_FIND_STREAM_ERROR(func, id) LOG(LS_ERROR) \
+ << "" << func << " - Failed to find stream: " << id
+
+class PassthroughStream: public webrtc::VideoRenderCallback {
+ public:
+ explicit PassthroughStream(const uint32_t stream_id)
+ : stream_id_(stream_id),
+ running_(false) {
+ }
+ virtual ~PassthroughStream() {
+ }
+ virtual int32_t RenderFrame(const uint32_t stream_id,
+ webrtc::I420VideoFrame& videoFrame) {
+ talk_base::CritScope cs(&stream_critical_);
+ // Send frame for rendering directly
+ if (running_ && renderer_) {
+ renderer_->RenderFrame(stream_id, videoFrame);
+ }
+ return 0;
+ }
+ int32_t SetRenderer(VideoRenderCallback* renderer) {
+ talk_base::CritScope cs(&stream_critical_);
+ renderer_ = renderer;
+ return 0;
+ }
+
+ int32_t StartRender() {
+ talk_base::CritScope cs(&stream_critical_);
+ running_ = true;
+ return 0;
+ }
+
+ int32_t StopRender() {
+ talk_base::CritScope cs(&stream_critical_);
+ running_ = false;
+ return 0;
+ }
+
+ private:
+ uint32_t stream_id_;
+ VideoRenderCallback* renderer_;
+ talk_base::CriticalSection stream_critical_;
+ bool running_;
+};
+
+WebRtcPassthroughRender::WebRtcPassthroughRender()
+ : window_(NULL) {
+}
+
+WebRtcPassthroughRender::~WebRtcPassthroughRender() {
+ while (!stream_render_map_.empty()) {
+ PassthroughStream* stream = stream_render_map_.begin()->second;
+ stream_render_map_.erase(stream_render_map_.begin());
+ delete stream;
+ }
+}
+
+webrtc::VideoRenderCallback* WebRtcPassthroughRender::AddIncomingRenderStream(
+ const uint32_t stream_id,
+ const uint32_t zOrder,
+ const float left, const float top,
+ const float right, const float bottom) {
+ talk_base::CritScope cs(&render_critical_);
+ // Stream already exist.
+ if (FindStream(stream_id) != NULL) {
+ LOG(LS_ERROR) << "AddIncomingRenderStream - Stream already exists: "
+ << stream_id;
+ return NULL;
+ }
+
+ PassthroughStream* stream = new PassthroughStream(stream_id);
+ // Store the stream
+ stream_render_map_[stream_id] = stream;
+ return stream;
+}
+
+int32_t WebRtcPassthroughRender::DeleteIncomingRenderStream(
+ const uint32_t stream_id) {
+ talk_base::CritScope cs(&render_critical_);
+ PassthroughStream* stream = FindStream(stream_id);
+ if (stream == NULL) {
+ LOG_FIND_STREAM_ERROR("DeleteIncomingRenderStream", stream_id);
+ return -1;
+ }
+ delete stream;
+ stream_render_map_.erase(stream_id);
+ return 0;
+}
+
+int32_t WebRtcPassthroughRender::AddExternalRenderCallback(
+ const uint32_t stream_id,
+ webrtc::VideoRenderCallback* render_object) {
+ talk_base::CritScope cs(&render_critical_);
+ PassthroughStream* stream = FindStream(stream_id);
+ if (stream == NULL) {
+ LOG_FIND_STREAM_ERROR("AddExternalRenderCallback", stream_id);
+ return -1;
+ }
+ return stream->SetRenderer(render_object);
+}
+
+bool WebRtcPassthroughRender::HasIncomingRenderStream(
+ const uint32_t stream_id) const {
+ return (FindStream(stream_id) != NULL);
+}
+
+webrtc::RawVideoType WebRtcPassthroughRender::PreferredVideoType() const {
+ return webrtc::kVideoI420;
+}
+
+int32_t WebRtcPassthroughRender::StartRender(const uint32_t stream_id) {
+ talk_base::CritScope cs(&render_critical_);
+ PassthroughStream* stream = FindStream(stream_id);
+ if (stream == NULL) {
+ LOG_FIND_STREAM_ERROR("StartRender", stream_id);
+ return -1;
+ }
+ return stream->StartRender();
+}
+
+int32_t WebRtcPassthroughRender::StopRender(const uint32_t stream_id) {
+ talk_base::CritScope cs(&render_critical_);
+ PassthroughStream* stream = FindStream(stream_id);
+ if (stream == NULL) {
+ LOG_FIND_STREAM_ERROR("StopRender", stream_id);
+ return -1;
+ }
+ return stream->StopRender();
+}
+
+// TODO(ronghuawu): Is it ok to return non-const pointer to PassthroughStream
+// from this const function FindStream.
+PassthroughStream* WebRtcPassthroughRender::FindStream(
+ const uint32_t stream_id) const {
+ StreamMap::const_iterator it = stream_render_map_.find(stream_id);
+ if (it == stream_render_map_.end()) {
+ return NULL;
+ }
+ return it->second;
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender.h
new file mode 100644
index 00000000000..e09182ff663
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender.h
@@ -0,0 +1,211 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_WEBRTCPASSTHROUGHRENDER_H_
+#define TALK_MEDIA_WEBRTCPASSTHROUGHRENDER_H_
+
+#include <map>
+
+#include "talk/base/criticalsection.h"
+#include "webrtc/modules/video_render/include/video_render.h"
+
+namespace cricket {
+class PassthroughStream;
+
+class WebRtcPassthroughRender : public webrtc::VideoRender {
+ public:
+ WebRtcPassthroughRender();
+ virtual ~WebRtcPassthroughRender();
+
+ virtual int32_t Version(int8_t* version,
+ uint32_t& remainingBufferInBytes,
+ uint32_t& position) const {
+ return 0;
+ }
+
+ virtual int32_t ChangeUniqueId(const int32_t id) {
+ return 0;
+ }
+
+ virtual int32_t TimeUntilNextProcess() { return 0; }
+
+ virtual int32_t Process() { return 0; }
+
+ virtual void* Window() {
+ talk_base::CritScope cs(&render_critical_);
+ return window_;
+ }
+
+ virtual int32_t ChangeWindow(void* window) {
+ talk_base::CritScope cs(&render_critical_);
+ window_ = window;
+ return 0;
+ }
+
+ virtual webrtc::VideoRenderCallback* AddIncomingRenderStream(
+ const uint32_t stream_id,
+ const uint32_t zOrder,
+ const float left, const float top,
+ const float right, const float bottom);
+
+ virtual int32_t DeleteIncomingRenderStream(const uint32_t stream_id);
+
+ virtual int32_t AddExternalRenderCallback(
+ const uint32_t stream_id,
+ webrtc::VideoRenderCallback* render_object);
+
+ virtual int32_t GetIncomingRenderStreamProperties(
+ const uint32_t stream_id,
+ uint32_t& zOrder,
+ float& left, float& top,
+ float& right, float& bottom) const {
+ return -1;
+ }
+
+ virtual uint32_t GetIncomingFrameRate(
+ const uint32_t stream_id) {
+ return 0;
+ }
+
+ virtual uint32_t GetNumIncomingRenderStreams() const {
+ return static_cast<uint32_t>(stream_render_map_.size());
+ }
+
+ virtual bool HasIncomingRenderStream(const uint32_t stream_id) const;
+
+ virtual int32_t RegisterRawFrameCallback(
+ const uint32_t stream_id,
+ webrtc::VideoRenderCallback* callback_obj) {
+ return -1;
+ }
+
+ virtual int32_t GetLastRenderedFrame(
+ const uint32_t stream_id,
+ webrtc::I420VideoFrame &frame) const {
+ return -1;
+ }
+
+ virtual int32_t StartRender(
+ const uint32_t stream_id);
+
+ virtual int32_t StopRender(
+ const uint32_t stream_id);
+
+ virtual int32_t ResetRender() { return 0; }
+
+ virtual webrtc::RawVideoType PreferredVideoType() const;
+
+ virtual bool IsFullScreen() { return false; }
+
+ virtual int32_t GetScreenResolution(uint32_t& screenWidth,
+ uint32_t& screenHeight) const {
+ return -1;
+ }
+
+ virtual uint32_t RenderFrameRate(
+ const uint32_t stream_id) {
+ return 0;
+ }
+
+ virtual int32_t SetStreamCropping(
+ const uint32_t stream_id,
+ const float left, const float top,
+ const float right,
+ const float bottom) {
+ return -1;
+ }
+
+ virtual int32_t SetExpectedRenderDelay(uint32_t stream_id, int32_t delay_ms) {
+ return -1;
+ }
+
+ virtual int32_t ConfigureRenderer(
+ const uint32_t stream_id,
+ const unsigned int zOrder,
+ const float left, const float top,
+ const float right,
+ const float bottom) {
+ return -1;
+ }
+
+ virtual int32_t SetTransparentBackground(const bool enable) {
+ return -1;
+ }
+
+ virtual int32_t FullScreenRender(void* window, const bool enable) {
+ return -1;
+ }
+
+ virtual int32_t SetBitmap(const void* bitMap,
+ const uint8_t pictureId, const void* colorKey,
+ const float left, const float top,
+ const float right, const float bottom) {
+ return -1;
+ }
+
+ virtual int32_t SetText(const uint8_t textId,
+ const uint8_t* text,
+ const int32_t textLength,
+ const uint32_t textColorRef,
+ const uint32_t backgroundColorRef,
+ const float left, const float top,
+ const float right, const float bottom) {
+ return -1;
+ }
+
+ virtual int32_t SetStartImage(
+ const uint32_t stream_id,
+ const webrtc::I420VideoFrame& videoFrame) {
+ return -1;
+ }
+
+ virtual int32_t SetTimeoutImage(
+ const uint32_t stream_id,
+ const webrtc::I420VideoFrame& videoFrame,
+ const uint32_t timeout) {
+ return -1;
+ }
+
+ virtual int32_t MirrorRenderStream(const int renderId,
+ const bool enable,
+ const bool mirrorXAxis,
+ const bool mirrorYAxis) {
+ return -1;
+ }
+
+ private:
+ typedef std::map<uint32_t, PassthroughStream*> StreamMap;
+
+ PassthroughStream* FindStream(const uint32_t stream_id) const;
+
+ void* window_;
+ StreamMap stream_render_map_;
+ talk_base::CriticalSection render_critical_;
+};
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTCPASSTHROUGHRENDER_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender_unittest.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender_unittest.cc
new file mode 100644
index 00000000000..4eb2892517f
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcpassthroughrender_unittest.cc
@@ -0,0 +1,147 @@
+// Copyright 2008 Google Inc.
+//
+// Author: Ronghua Wu (ronghuawu@google.com)
+
+#include <string>
+
+#include "talk/base/gunit.h"
+#include "talk/media/base/testutils.h"
+#include "talk/media/webrtc/webrtcpassthroughrender.h"
+
+class WebRtcPassthroughRenderTest : public testing::Test {
+ public:
+ class ExternalRenderer : public webrtc::VideoRenderCallback {
+ public:
+ ExternalRenderer() : frame_num_(0) {
+ }
+
+ virtual ~ExternalRenderer() {
+ }
+
+ virtual int32_t RenderFrame(const uint32_t stream_id,
+ webrtc::I420VideoFrame& videoFrame) {
+ ++frame_num_;
+ LOG(INFO) << "RenderFrame stream_id: " << stream_id
+ << " frame_num: " << frame_num_;
+ return 0;
+ }
+
+ int frame_num() const {
+ return frame_num_;
+ }
+
+ private:
+ int frame_num_;
+ };
+
+ WebRtcPassthroughRenderTest()
+ : renderer_(new cricket::WebRtcPassthroughRender()) {
+ }
+
+ ~WebRtcPassthroughRenderTest() {
+ }
+
+ webrtc::VideoRenderCallback* AddIncomingRenderStream(int stream_id) {
+ return renderer_->AddIncomingRenderStream(stream_id, 0, 0, 0, 0, 0);
+ }
+
+ bool HasIncomingRenderStream(int stream_id) {
+ return renderer_->HasIncomingRenderStream(stream_id);
+ }
+
+ bool DeleteIncomingRenderStream(int stream_id) {
+ return (renderer_->DeleteIncomingRenderStream(stream_id) == 0);
+ }
+
+ bool AddExternalRenderCallback(int stream_id,
+ webrtc::VideoRenderCallback* renderer) {
+ return (renderer_->AddExternalRenderCallback(stream_id, renderer) == 0);
+ }
+
+ bool StartRender(int stream_id) {
+ return (renderer_->StartRender(stream_id) == 0);
+ }
+
+ bool StopRender(int stream_id) {
+ return (renderer_->StopRender(stream_id) == 0);
+ }
+
+ private:
+ talk_base::scoped_ptr<cricket::WebRtcPassthroughRender> renderer_;
+};
+
+TEST_F(WebRtcPassthroughRenderTest, Streams) {
+ const int stream_id1 = 1234;
+ const int stream_id2 = 5678;
+ const int stream_id3 = 9012; // A stream that doesn't exist.
+ webrtc::VideoRenderCallback* stream = NULL;
+ // Add a new stream
+ stream = AddIncomingRenderStream(stream_id1);
+ EXPECT_TRUE(stream != NULL);
+ EXPECT_TRUE(HasIncomingRenderStream(stream_id1));
+ // Tried to add a already existed stream should return null
+ stream =AddIncomingRenderStream(stream_id1);
+ EXPECT_TRUE(stream == NULL);
+ stream = AddIncomingRenderStream(stream_id2);
+ EXPECT_TRUE(stream != NULL);
+ EXPECT_TRUE(HasIncomingRenderStream(stream_id2));
+ // Remove the stream
+ EXPECT_FALSE(DeleteIncomingRenderStream(stream_id3));
+ EXPECT_TRUE(DeleteIncomingRenderStream(stream_id2));
+ EXPECT_TRUE(!HasIncomingRenderStream(stream_id2));
+ // Add back the removed stream
+ stream = AddIncomingRenderStream(stream_id2);
+ EXPECT_TRUE(stream != NULL);
+ EXPECT_TRUE(HasIncomingRenderStream(stream_id2));
+}
+
+TEST_F(WebRtcPassthroughRenderTest, Renderer) {
+ webrtc::I420VideoFrame frame;
+ const int stream_id1 = 1234;
+ const int stream_id2 = 5678;
+ const int stream_id3 = 9012; // A stream that doesn't exist.
+ webrtc::VideoRenderCallback* stream1 = NULL;
+ webrtc::VideoRenderCallback* stream2 = NULL;
+ // Add two new stream
+ stream1 = AddIncomingRenderStream(stream_id1);
+ EXPECT_TRUE(stream1 != NULL);
+ EXPECT_TRUE(HasIncomingRenderStream(stream_id1));
+ stream2 = AddIncomingRenderStream(stream_id2);
+ EXPECT_TRUE(stream2 != NULL);
+ EXPECT_TRUE(HasIncomingRenderStream(stream_id2));
+ // Register the external renderer
+ WebRtcPassthroughRenderTest::ExternalRenderer renderer1;
+ WebRtcPassthroughRenderTest::ExternalRenderer renderer2;
+ EXPECT_FALSE(AddExternalRenderCallback(stream_id3, &renderer1));
+ EXPECT_TRUE(AddExternalRenderCallback(stream_id1, &renderer1));
+ EXPECT_TRUE(AddExternalRenderCallback(stream_id2, &renderer2));
+ int test_frame_num = 10;
+ // RenderFrame without starting the render
+ for (int i = 0; i < test_frame_num; ++i) {
+ stream1->RenderFrame(stream_id1, frame);
+ }
+ EXPECT_EQ(0, renderer1.frame_num());
+ // Start the render and test again.
+ EXPECT_FALSE(StartRender(stream_id3));
+ EXPECT_TRUE(StartRender(stream_id1));
+ for (int i = 0; i < test_frame_num; ++i) {
+ stream1->RenderFrame(stream_id1, frame);
+ }
+ EXPECT_EQ(test_frame_num, renderer1.frame_num());
+ // Stop the render and test again.
+ EXPECT_FALSE(StopRender(stream_id3));
+ EXPECT_TRUE(StopRender(stream_id1));
+ for (int i = 0; i < test_frame_num; ++i) {
+ stream1->RenderFrame(stream_id1, frame);
+ }
+ // The frame number should not have changed.
+ EXPECT_EQ(test_frame_num, renderer1.frame_num());
+
+ // Test on stream2 with a differnt number.
+ EXPECT_TRUE(StartRender(stream_id2));
+ test_frame_num = 30;
+ for (int i = 0; i < test_frame_num; ++i) {
+ stream2->RenderFrame(stream_id2, frame);
+ }
+ EXPECT_EQ(test_frame_num, renderer2.frame_num());
+}
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe.cc
new file mode 100644
index 00000000000..08f63a549c9
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe.cc
@@ -0,0 +1,183 @@
+/*
+ * libjingle
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/media/webrtc/webrtctexturevideoframe.h"
+
+#include "talk/base/common.h"
+#include "talk/base/logging.h"
+#include "talk/base/stream.h"
+
+#define UNIMPLEMENTED \
+ LOG(LS_ERROR) << "Call to unimplemented function "<< __FUNCTION__; \
+ ASSERT(false)
+
+namespace cricket {
+
+WebRtcTextureVideoFrame::WebRtcTextureVideoFrame(
+ webrtc::NativeHandle* handle, int width, int height, int64 elapsed_time,
+ int64 time_stamp)
+ : handle_(handle), width_(width), height_(height),
+ elapsed_time_(elapsed_time), time_stamp_(time_stamp) {}
+
+WebRtcTextureVideoFrame::~WebRtcTextureVideoFrame() {}
+
+bool WebRtcTextureVideoFrame::InitToBlack(
+ int w, int h, size_t pixel_width, size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp) {
+ UNIMPLEMENTED;
+ return false;
+}
+
+bool WebRtcTextureVideoFrame::Reset(
+ uint32 fourcc, int w, int h, int dw, int dh, uint8* sample,
+ size_t sample_size, size_t pixel_width, size_t pixel_height,
+ int64 elapsed_time, int64 time_stamp, int rotation) {
+ UNIMPLEMENTED;
+ return false;
+}
+
+const uint8* WebRtcTextureVideoFrame::GetYPlane() const {
+ UNIMPLEMENTED;
+ return NULL;
+}
+
+const uint8* WebRtcTextureVideoFrame::GetUPlane() const {
+ UNIMPLEMENTED;
+ return NULL;
+}
+
+const uint8* WebRtcTextureVideoFrame::GetVPlane() const {
+ UNIMPLEMENTED;
+ return NULL;
+}
+
+uint8* WebRtcTextureVideoFrame::GetYPlane() {
+ UNIMPLEMENTED;
+ return NULL;
+}
+
+uint8* WebRtcTextureVideoFrame::GetUPlane() {
+ UNIMPLEMENTED;
+ return NULL;
+}
+
+uint8* WebRtcTextureVideoFrame::GetVPlane() {
+ UNIMPLEMENTED;
+ return NULL;
+}
+
+int32 WebRtcTextureVideoFrame::GetYPitch() const {
+ UNIMPLEMENTED;
+ return width_;
+}
+
+int32 WebRtcTextureVideoFrame::GetUPitch() const {
+ UNIMPLEMENTED;
+ return (width_ + 1) / 2;
+}
+
+int32 WebRtcTextureVideoFrame::GetVPitch() const {
+ UNIMPLEMENTED;
+ return (width_ + 1) / 2;
+}
+
+VideoFrame* WebRtcTextureVideoFrame::Copy() const {
+ return new WebRtcTextureVideoFrame(
+ handle_, width_, height_, elapsed_time_, time_stamp_);
+}
+
+bool WebRtcTextureVideoFrame::MakeExclusive() {
+ UNIMPLEMENTED;
+ return false;
+}
+
+size_t WebRtcTextureVideoFrame::CopyToBuffer(uint8* buffer, size_t size) const {
+ UNIMPLEMENTED;
+ return 0;
+}
+
+size_t WebRtcTextureVideoFrame::ConvertToRgbBuffer(
+ uint32 to_fourcc, uint8* buffer, size_t size, int stride_rgb) const {
+ UNIMPLEMENTED;
+ return 0;
+}
+
+bool WebRtcTextureVideoFrame::CopyToPlanes(
+ uint8* dst_y, uint8* dst_u, uint8* dst_v, int32 dst_pitch_y,
+ int32 dst_pitch_u, int32 dst_pitch_v) const {
+ UNIMPLEMENTED;
+ return false;
+}
+
+void WebRtcTextureVideoFrame::CopyToFrame(VideoFrame* dst) const {
+ UNIMPLEMENTED;
+}
+
+talk_base::StreamResult WebRtcTextureVideoFrame::Write(
+ talk_base::StreamInterface* stream, int* error) {
+ UNIMPLEMENTED;
+ return talk_base::SR_ERROR;
+}
+void WebRtcTextureVideoFrame::StretchToPlanes(
+ uint8* dst_y, uint8* dst_u, uint8* dst_v, int32 dst_pitch_y,
+ int32 dst_pitch_u, int32 dst_pitch_v, size_t width, size_t height,
+ bool interpolate, bool vert_crop) const {
+ UNIMPLEMENTED;
+}
+
+size_t WebRtcTextureVideoFrame::StretchToBuffer(
+ size_t dst_width, size_t dst_height, uint8* dst_buffer, size_t size,
+ bool interpolate, bool vert_crop) const {
+ UNIMPLEMENTED;
+ return 0;
+}
+
+void WebRtcTextureVideoFrame::StretchToFrame(
+ VideoFrame* dst, bool interpolate, bool vert_crop) const {
+ UNIMPLEMENTED;
+}
+
+VideoFrame* WebRtcTextureVideoFrame::Stretch(
+ size_t dst_width, size_t dst_height, bool interpolate,
+ bool vert_crop) const {
+ UNIMPLEMENTED;
+ return NULL;
+}
+
+bool WebRtcTextureVideoFrame::SetToBlack() {
+ UNIMPLEMENTED;
+ return false;
+}
+
+VideoFrame* WebRtcTextureVideoFrame::CreateEmptyFrame(
+ int w, int h, size_t pixel_width, size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp) const {
+ UNIMPLEMENTED;
+ return NULL;
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe.h
new file mode 100644
index 00000000000..05b50f74c28
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe.h
@@ -0,0 +1,120 @@
+/*
+ * libjingle
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_WEBRTC_WEBRTCTEXTUREVIDEOFRAME_H_
+#define TALK_MEDIA_WEBRTC_WEBRTCTEXTUREVIDEOFRAME_H_
+
+#include "talk/base/refcount.h"
+#include "talk/base/scoped_ref_ptr.h"
+#include "talk/media/base/videoframe.h"
+#ifdef USE_WEBRTC_DEV_BRANCH
+#include "webrtc/common_video/interface/native_handle.h"
+#else
+#include "webrtc/common_video/interface/i420_video_frame.h"
+// Define NativeHandle to an existing type so we don't need to add lots of
+// USE_WEBRTC_DEV_BRANCH.
+#define NativeHandle I420VideoFrame
+#endif
+
+namespace cricket {
+
+// A video frame backed by the texture via a native handle.
+class WebRtcTextureVideoFrame : public VideoFrame {
+ public:
+ WebRtcTextureVideoFrame(webrtc::NativeHandle* handle, int width, int height,
+ int64 elapsed_time, int64 time_stamp);
+ virtual ~WebRtcTextureVideoFrame();
+
+ // From base class VideoFrame.
+ virtual bool InitToBlack(int w, int h, size_t pixel_width,
+ size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp);
+ virtual bool Reset(uint32 fourcc, int w, int h, int dw, int dh, uint8* sample,
+ size_t sample_size, size_t pixel_width,
+ size_t pixel_height, int64 elapsed_time, int64 time_stamp,
+ int rotation);
+ virtual size_t GetWidth() const { return width_; }
+ virtual size_t GetHeight() const { return height_; }
+ virtual const uint8* GetYPlane() const;
+ virtual const uint8* GetUPlane() const;
+ virtual const uint8* GetVPlane() const;
+ virtual uint8* GetYPlane();
+ virtual uint8* GetUPlane();
+ virtual uint8* GetVPlane();
+ virtual int32 GetYPitch() const;
+ virtual int32 GetUPitch() const;
+ virtual int32 GetVPitch() const;
+ virtual size_t GetPixelWidth() const { return 1; }
+ virtual size_t GetPixelHeight() const { return 1; }
+ virtual int64 GetElapsedTime() const { return elapsed_time_; }
+ virtual int64 GetTimeStamp() const { return time_stamp_; }
+ virtual void SetElapsedTime(int64 elapsed_time) {
+ elapsed_time_ = elapsed_time;
+ }
+ virtual void SetTimeStamp(int64 time_stamp) { time_stamp_ = time_stamp; }
+ virtual int GetRotation() const { return 0; }
+ virtual VideoFrame* Copy() const;
+ virtual bool MakeExclusive();
+ virtual size_t CopyToBuffer(uint8* buffer, size_t size) const;
+ virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
+ size_t size, int stride_rgb) const;
+ virtual void* GetNativeHandle() const { return handle_.get(); }
+
+ virtual bool CopyToPlanes(
+ uint8* dst_y, uint8* dst_u, uint8* dst_v,
+ int32 dst_pitch_y, int32 dst_pitch_u, int32 dst_pitch_v) const;
+ virtual void CopyToFrame(VideoFrame* target) const;
+ virtual talk_base::StreamResult Write(talk_base::StreamInterface* stream,
+ int* error);
+ virtual void StretchToPlanes(
+ uint8* y, uint8* u, uint8* v, int32 pitchY, int32 pitchU, int32 pitchV,
+ size_t width, size_t height, bool interpolate, bool crop) const;
+ virtual size_t StretchToBuffer(size_t w, size_t h, uint8* buffer, size_t size,
+ bool interpolate, bool crop) const;
+ virtual void StretchToFrame(VideoFrame* target, bool interpolate,
+ bool crop) const;
+ virtual VideoFrame* Stretch(size_t w, size_t h, bool interpolate,
+ bool crop) const;
+ virtual bool SetToBlack();
+
+ protected:
+ virtual VideoFrame* CreateEmptyFrame(int w, int h, size_t pixel_width,
+ size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp) const;
+
+ private:
+ // The handle of the underlying video frame.
+ talk_base::scoped_refptr<webrtc::NativeHandle> handle_;
+ int width_;
+ int height_;
+ int64 elapsed_time_;
+ int64 time_stamp_;
+};
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTC_WEBRTCTEXTUREVIDEOFRAME_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe_unittest.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe_unittest.cc
new file mode 100644
index 00000000000..9ac16da87d3
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtctexturevideoframe_unittest.cc
@@ -0,0 +1,84 @@
+/*
+ * libjingle
+ * Copyright 2013 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/media/webrtc/webrtctexturevideoframe.h"
+
+#include "talk/base/gunit.h"
+#include "talk/media/base/videocommon.h"
+
+class NativeHandleImpl : public webrtc::NativeHandle {
+ public:
+ NativeHandleImpl() : ref_count_(0) {}
+ virtual ~NativeHandleImpl() {}
+ virtual int32_t AddRef() { return ++ref_count_; }
+ virtual int32_t Release() { return --ref_count_; }
+ virtual void* GetHandle() { return NULL; }
+
+ int32_t ref_count() { return ref_count_; }
+ private:
+ int32_t ref_count_;
+};
+
+TEST(WebRtcTextureVideoFrameTest, InitialValues) {
+ NativeHandleImpl handle;
+ cricket::WebRtcTextureVideoFrame frame(&handle, 640, 480, 100, 200);
+ EXPECT_EQ(&handle, frame.GetNativeHandle());
+ EXPECT_EQ(640u, frame.GetWidth());
+ EXPECT_EQ(480u, frame.GetHeight());
+ EXPECT_EQ(100, frame.GetElapsedTime());
+ EXPECT_EQ(200, frame.GetTimeStamp());
+ frame.SetElapsedTime(300);
+ EXPECT_EQ(300, frame.GetElapsedTime());
+ frame.SetTimeStamp(400);
+ EXPECT_EQ(400, frame.GetTimeStamp());
+}
+
+TEST(WebRtcTextureVideoFrameTest, CopyFrame) {
+ NativeHandleImpl handle;
+ cricket::WebRtcTextureVideoFrame frame1(&handle, 640, 480, 100, 200);
+ cricket::VideoFrame* frame2 = frame1.Copy();
+ EXPECT_EQ(frame1.GetNativeHandle(), frame2->GetNativeHandle());
+ EXPECT_EQ(frame1.GetWidth(), frame2->GetWidth());
+ EXPECT_EQ(frame1.GetHeight(), frame2->GetHeight());
+ EXPECT_EQ(frame1.GetElapsedTime(), frame2->GetElapsedTime());
+ EXPECT_EQ(frame1.GetTimeStamp(), frame2->GetTimeStamp());
+ delete frame2;
+}
+
+TEST(WebRtcTextureVideoFrameTest, RefCount) {
+ NativeHandleImpl handle;
+ EXPECT_EQ(0, handle.ref_count());
+ cricket::WebRtcTextureVideoFrame* frame1 =
+ new cricket::WebRtcTextureVideoFrame(&handle, 640, 480, 100, 200);
+ EXPECT_EQ(1, handle.ref_count());
+ cricket::VideoFrame* frame2 = frame1->Copy();
+ EXPECT_EQ(2, handle.ref_count());
+ delete frame2;
+ EXPECT_EQ(1, handle.ref_count());
+ delete frame1;
+ EXPECT_EQ(0, handle.ref_count());
+}
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer.cc
new file mode 100644
index 00000000000..bcfda4e84e8
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer.cc
@@ -0,0 +1,366 @@
+// libjingle
+// Copyright 2011 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Implementation of class WebRtcVideoCapturer.
+
+#include "talk/media/webrtc/webrtcvideocapturer.h"
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#ifdef HAVE_WEBRTC_VIDEO
+#include "talk/base/logging.h"
+#include "talk/base/thread.h"
+#include "talk/base/timeutils.h"
+#include "talk/media/webrtc/webrtcvideoframe.h"
+
+#include "talk/base/win32.h" // Need this to #include the impl files.
+#include "webrtc/modules/video_capture/include/video_capture_factory.h"
+
+namespace cricket {
+
+struct kVideoFourCCEntry {
+ uint32 fourcc;
+ webrtc::RawVideoType webrtc_type;
+};
+
+// This indicates our format preferences and defines a mapping between
+// webrtc::RawVideoType (from video_capture_defines.h) to our FOURCCs.
+static kVideoFourCCEntry kSupportedFourCCs[] = {
+ { FOURCC_I420, webrtc::kVideoI420 }, // 12 bpp, no conversion.
+ { FOURCC_YV12, webrtc::kVideoYV12 }, // 12 bpp, no conversion.
+ { FOURCC_NV12, webrtc::kVideoNV12 }, // 12 bpp, fast conversion.
+ { FOURCC_NV21, webrtc::kVideoNV21 }, // 12 bpp, fast conversion.
+ { FOURCC_YUY2, webrtc::kVideoYUY2 }, // 16 bpp, fast conversion.
+ { FOURCC_UYVY, webrtc::kVideoUYVY }, // 16 bpp, fast conversion.
+ { FOURCC_MJPG, webrtc::kVideoMJPEG }, // compressed, slow conversion.
+ { FOURCC_ARGB, webrtc::kVideoARGB }, // 32 bpp, slow conversion.
+ { FOURCC_24BG, webrtc::kVideoRGB24 }, // 24 bpp, slow conversion.
+};
+
+class WebRtcVcmFactory : public WebRtcVcmFactoryInterface {
+ public:
+ virtual webrtc::VideoCaptureModule* Create(int id, const char* device) {
+ return webrtc::VideoCaptureFactory::Create(id, device);
+ }
+ virtual webrtc::VideoCaptureModule::DeviceInfo* CreateDeviceInfo(int id) {
+ return webrtc::VideoCaptureFactory::CreateDeviceInfo(id);
+ }
+ virtual void DestroyDeviceInfo(webrtc::VideoCaptureModule::DeviceInfo* info) {
+ delete info;
+ }
+};
+
+static bool CapabilityToFormat(const webrtc::VideoCaptureCapability& cap,
+ VideoFormat* format) {
+ uint32 fourcc = 0;
+ for (size_t i = 0; i < ARRAY_SIZE(kSupportedFourCCs); ++i) {
+ if (kSupportedFourCCs[i].webrtc_type == cap.rawType) {
+ fourcc = kSupportedFourCCs[i].fourcc;
+ break;
+ }
+ }
+ if (fourcc == 0) {
+ return false;
+ }
+
+ format->fourcc = fourcc;
+ format->width = cap.width;
+ format->height = cap.height;
+ format->interval = VideoFormat::FpsToInterval(cap.maxFPS);
+ return true;
+}
+
+static bool FormatToCapability(const VideoFormat& format,
+ webrtc::VideoCaptureCapability* cap) {
+ webrtc::RawVideoType webrtc_type = webrtc::kVideoUnknown;
+ for (size_t i = 0; i < ARRAY_SIZE(kSupportedFourCCs); ++i) {
+ if (kSupportedFourCCs[i].fourcc == format.fourcc) {
+ webrtc_type = kSupportedFourCCs[i].webrtc_type;
+ break;
+ }
+ }
+ if (webrtc_type == webrtc::kVideoUnknown) {
+ return false;
+ }
+
+ cap->width = format.width;
+ cap->height = format.height;
+ cap->maxFPS = VideoFormat::IntervalToFps(format.interval);
+ cap->expectedCaptureDelay = 0;
+ cap->rawType = webrtc_type;
+ cap->codecType = webrtc::kVideoCodecUnknown;
+ cap->interlaced = false;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Implementation of class WebRtcVideoCapturer
+///////////////////////////////////////////////////////////////////////////
+
+WebRtcVideoCapturer::WebRtcVideoCapturer()
+ : factory_(new WebRtcVcmFactory),
+ module_(NULL),
+ captured_frames_(0) {
+}
+
+WebRtcVideoCapturer::WebRtcVideoCapturer(WebRtcVcmFactoryInterface* factory)
+ : factory_(factory),
+ module_(NULL),
+ captured_frames_(0) {
+}
+
+WebRtcVideoCapturer::~WebRtcVideoCapturer() {
+ if (module_) {
+ module_->Release();
+ }
+}
+
+bool WebRtcVideoCapturer::Init(const Device& device) {
+ if (module_) {
+ LOG(LS_ERROR) << "The capturer is already initialized";
+ return false;
+ }
+
+ webrtc::VideoCaptureModule::DeviceInfo* info = factory_->CreateDeviceInfo(0);
+ if (!info) {
+ return false;
+ }
+
+ // Find the desired camera, by name.
+ // In the future, comparing IDs will be more robust.
+ // TODO(juberti): Figure what's needed to allow this.
+ int num_cams = info->NumberOfDevices();
+ char vcm_id[256] = "";
+ bool found = false;
+ for (int index = 0; index < num_cams; ++index) {
+ char vcm_name[256];
+ if (info->GetDeviceName(index, vcm_name, ARRAY_SIZE(vcm_name),
+ vcm_id, ARRAY_SIZE(vcm_id)) != -1) {
+ if (device.name == reinterpret_cast<char*>(vcm_name)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found) {
+ LOG(LS_WARNING) << "Failed to find capturer for id: " << device.id;
+ factory_->DestroyDeviceInfo(info);
+ return false;
+ }
+
+ // Enumerate the supported formats.
+ // TODO(juberti): Find out why this starts/stops the camera...
+ std::vector<VideoFormat> supported;
+ int32_t num_caps = info->NumberOfCapabilities(vcm_id);
+ for (int32_t i = 0; i < num_caps; ++i) {
+ webrtc::VideoCaptureCapability cap;
+ if (info->GetCapability(vcm_id, i, cap) != -1) {
+ VideoFormat format;
+ if (CapabilityToFormat(cap, &format)) {
+ supported.push_back(format);
+ } else {
+ LOG(LS_WARNING) << "Ignoring unsupported WebRTC capture format "
+ << cap.rawType;
+ }
+ }
+ }
+ factory_->DestroyDeviceInfo(info);
+ if (supported.empty()) {
+ LOG(LS_ERROR) << "Failed to find usable formats for id: " << device.id;
+ return false;
+ }
+
+ module_ = factory_->Create(0, vcm_id);
+ if (!module_) {
+ LOG(LS_ERROR) << "Failed to create capturer for id: " << device.id;
+ return false;
+ }
+
+ // It is safe to change member attributes now.
+ module_->AddRef();
+ SetId(device.id);
+ SetSupportedFormats(supported);
+ return true;
+}
+
+bool WebRtcVideoCapturer::Init(webrtc::VideoCaptureModule* module) {
+ if (module_) {
+ LOG(LS_ERROR) << "The capturer is already initialized";
+ return false;
+ }
+ if (!module) {
+ LOG(LS_ERROR) << "Invalid VCM supplied";
+ return false;
+ }
+ // TODO(juberti): Set id and formats.
+ (module_ = module)->AddRef();
+ return true;
+}
+
+bool WebRtcVideoCapturer::GetBestCaptureFormat(const VideoFormat& desired,
+ VideoFormat* best_format) {
+ if (!best_format) {
+ return false;
+ }
+
+ if (!VideoCapturer::GetBestCaptureFormat(desired, best_format)) {
+ // We maybe using a manually injected VCM which doesn't support enum.
+ // Use the desired format as the best format.
+ best_format->width = desired.width;
+ best_format->height = desired.height;
+ best_format->fourcc = FOURCC_I420;
+ best_format->interval = desired.interval;
+ LOG(LS_INFO) << "Failed to find best capture format,"
+ << " fall back to the requested format "
+ << best_format->ToString();
+ }
+ return true;
+}
+
+CaptureState WebRtcVideoCapturer::Start(const VideoFormat& capture_format) {
+ if (!module_) {
+ LOG(LS_ERROR) << "The capturer has not been initialized";
+ return CS_NO_DEVICE;
+ }
+
+ // TODO(hellner): weird to return failure when it is in fact actually running.
+ if (IsRunning()) {
+ LOG(LS_ERROR) << "The capturer is already running";
+ return CS_FAILED;
+ }
+
+ SetCaptureFormat(&capture_format);
+
+ webrtc::VideoCaptureCapability cap;
+ if (!FormatToCapability(capture_format, &cap)) {
+ LOG(LS_ERROR) << "Invalid capture format specified";
+ return CS_FAILED;
+ }
+
+ std::string camera_id(GetId());
+ uint32 start = talk_base::Time();
+ if (module_->RegisterCaptureDataCallback(*this) != 0 ||
+ module_->StartCapture(cap) != 0) {
+ LOG(LS_ERROR) << "Camera '" << camera_id << "' failed to start";
+ return CS_FAILED;
+ }
+
+ LOG(LS_INFO) << "Camera '" << camera_id << "' started with format "
+ << capture_format.ToString() << ", elapsed time "
+ << talk_base::TimeSince(start) << " ms";
+
+ captured_frames_ = 0;
+ SetCaptureState(CS_RUNNING);
+ return CS_STARTING;
+}
+
+void WebRtcVideoCapturer::Stop() {
+ if (IsRunning()) {
+ talk_base::Thread::Current()->Clear(this);
+ module_->StopCapture();
+ module_->DeRegisterCaptureDataCallback();
+
+ // TODO(juberti): Determine if the VCM exposes any drop stats we can use.
+ double drop_ratio = 0.0;
+ std::string camera_id(GetId());
+ LOG(LS_INFO) << "Camera '" << camera_id << "' stopped after capturing "
+ << captured_frames_ << " frames and dropping "
+ << drop_ratio << "%";
+ }
+ SetCaptureFormat(NULL);
+}
+
+bool WebRtcVideoCapturer::IsRunning() {
+ return (module_ != NULL && module_->CaptureStarted());
+}
+
+bool WebRtcVideoCapturer::GetPreferredFourccs(
+ std::vector<uint32>* fourccs) {
+ if (!fourccs) {
+ return false;
+ }
+
+ fourccs->clear();
+ for (size_t i = 0; i < ARRAY_SIZE(kSupportedFourCCs); ++i) {
+ fourccs->push_back(kSupportedFourCCs[i].fourcc);
+ }
+ return true;
+}
+
+void WebRtcVideoCapturer::OnIncomingCapturedFrame(const int32_t id,
+ webrtc::I420VideoFrame& sample) {
+ ASSERT(IsRunning());
+
+ ++captured_frames_;
+ // Log the size and pixel aspect ratio of the first captured frame.
+ if (1 == captured_frames_) {
+ LOG(LS_INFO) << "Captured frame size "
+ << sample.width() << "x" << sample.height()
+ << ". Expected format " << GetCaptureFormat()->ToString();
+ }
+
+ // Signal down stream components on captured frame.
+ // The CapturedFrame class doesn't support planes. We have to ExtractBuffer
+ // to one block for it.
+ int length = webrtc::CalcBufferSize(webrtc::kI420,
+ sample.width(), sample.height());
+ if (!captured_frame_.get() ||
+ captured_frame_->length() != static_cast<size_t>(length)) {
+ captured_frame_.reset(new FrameBuffer(length));
+ }
+ // TODO(ronghuawu): Refactor the WebRtcVideoFrame to avoid memory copy.
+ webrtc::ExtractBuffer(sample, length,
+ reinterpret_cast<uint8_t*>(captured_frame_->data()));
+ WebRtcCapturedFrame frame(sample, captured_frame_->data(), length);
+ SignalFrameCaptured(this, &frame);
+}
+
+void WebRtcVideoCapturer::OnCaptureDelayChanged(const int32_t id,
+ const int32_t delay) {
+ LOG(LS_INFO) << "Capture delay changed to " << delay << " ms";
+}
+
+// WebRtcCapturedFrame
+WebRtcCapturedFrame::WebRtcCapturedFrame(const webrtc::I420VideoFrame& sample,
+ void* buffer,
+ int length) {
+ width = sample.width();
+ height = sample.height();
+ fourcc = FOURCC_I420;
+ // TODO(hellner): Support pixel aspect ratio (for OSX).
+ pixel_width = 1;
+ pixel_height = 1;
+ // Convert units from VideoFrame RenderTimeMs to CapturedFrame (nanoseconds).
+ elapsed_time = sample.render_time_ms() * talk_base::kNumNanosecsPerMillisec;
+ time_stamp = elapsed_time;
+ data_size = length;
+ data = buffer;
+}
+
+} // namespace cricket
+
+#endif // HAVE_WEBRTC_VIDEO
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer.h
new file mode 100644
index 00000000000..eb999564409
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer.h
@@ -0,0 +1,103 @@
+// libjingle
+// Copyright 2004 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef TALK_MEDIA_WEBRTCVIDEOCAPTURER_H_
+#define TALK_MEDIA_WEBRTCVIDEOCAPTURER_H_
+
+#ifdef HAVE_WEBRTC_VIDEO
+
+#include <string>
+#include <vector>
+
+#include "talk/base/messagehandler.h"
+#include "talk/media/base/videocapturer.h"
+#include "talk/media/webrtc/webrtcvideoframe.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+#include "webrtc/modules/video_capture/include/video_capture.h"
+
+namespace cricket {
+
+// Factory to allow injection of a VCM impl into WebRtcVideoCapturer.
+// DeviceInfos do not have a Release() and therefore need an explicit Destroy().
+class WebRtcVcmFactoryInterface {
+ public:
+ virtual ~WebRtcVcmFactoryInterface() {}
+ virtual webrtc::VideoCaptureModule* Create(
+ int id, const char* device) = 0;
+ virtual webrtc::VideoCaptureModule::DeviceInfo* CreateDeviceInfo(int id) = 0;
+ virtual void DestroyDeviceInfo(
+ webrtc::VideoCaptureModule::DeviceInfo* info) = 0;
+};
+
+// WebRTC-based implementation of VideoCapturer.
+class WebRtcVideoCapturer : public VideoCapturer,
+ public webrtc::VideoCaptureDataCallback {
+ public:
+ WebRtcVideoCapturer();
+ explicit WebRtcVideoCapturer(WebRtcVcmFactoryInterface* factory);
+ virtual ~WebRtcVideoCapturer();
+
+ bool Init(const Device& device);
+ bool Init(webrtc::VideoCaptureModule* module);
+
+ // Override virtual methods of the parent class VideoCapturer.
+ virtual bool GetBestCaptureFormat(const VideoFormat& desired,
+ VideoFormat* best_format);
+ virtual CaptureState Start(const VideoFormat& capture_format);
+ virtual void Stop();
+ virtual bool IsRunning();
+ virtual bool IsScreencast() const { return false; }
+
+ protected:
+ // Override virtual methods of the parent class VideoCapturer.
+ virtual bool GetPreferredFourccs(std::vector<uint32>* fourccs);
+
+ private:
+ // Callback when a frame is captured by camera.
+ virtual void OnIncomingCapturedFrame(const int32_t id,
+ webrtc::I420VideoFrame& frame);
+ virtual void OnIncomingCapturedEncodedFrame(const int32_t id,
+ webrtc::VideoFrame& frame,
+ webrtc::VideoCodecType codec_type) {
+ }
+ virtual void OnCaptureDelayChanged(const int32_t id,
+ const int32_t delay);
+
+ talk_base::scoped_ptr<WebRtcVcmFactoryInterface> factory_;
+ webrtc::VideoCaptureModule* module_;
+ int captured_frames_;
+ talk_base::scoped_ptr<FrameBuffer> captured_frame_;
+};
+
+struct WebRtcCapturedFrame : public CapturedFrame {
+ public:
+ WebRtcCapturedFrame(const webrtc::I420VideoFrame& frame,
+ void* buffer, int length);
+};
+
+} // namespace cricket
+
+#endif // HAVE_WEBRTC_VIDEO
+#endif // TALK_MEDIA_WEBRTCVIDEOCAPTURER_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer_unittest.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer_unittest.cc
new file mode 100644
index 00000000000..226aa4b333d
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideocapturer_unittest.cc
@@ -0,0 +1,145 @@
+// libjingle
+// Copyright 2004 Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// 3. The name of the author may not be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+#include <vector>
+#include "talk/base/gunit.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringutils.h"
+#include "talk/base/thread.h"
+#include "talk/media/base/testutils.h"
+#include "talk/media/base/videocommon.h"
+#include "talk/media/webrtc/fakewebrtcvcmfactory.h"
+#include "talk/media/webrtc/webrtcvideocapturer.h"
+
+using cricket::VideoFormat;
+
+static const std::string kTestDeviceName = "JuberTech FakeCam Q123";
+static const std::string kTestDeviceId = "foo://bar/baz";
+const VideoFormat kDefaultVideoFormat =
+ VideoFormat(640, 400, VideoFormat::FpsToInterval(30), cricket::FOURCC_ANY);
+
+class WebRtcVideoCapturerTest : public testing::Test {
+ public:
+ WebRtcVideoCapturerTest()
+ : factory_(new FakeWebRtcVcmFactory),
+ capturer_(new cricket::WebRtcVideoCapturer(factory_)),
+ listener_(capturer_.get()) {
+ factory_->device_info.AddDevice(kTestDeviceName, kTestDeviceId);
+ // add a VGA/I420 capability
+ webrtc::VideoCaptureCapability vga;
+ vga.width = 640;
+ vga.height = 480;
+ vga.maxFPS = 30;
+ vga.rawType = webrtc::kVideoI420;
+ factory_->device_info.AddCapability(kTestDeviceId, vga);
+ }
+
+ protected:
+ FakeWebRtcVcmFactory* factory_; // owned by capturer_
+ talk_base::scoped_ptr<cricket::WebRtcVideoCapturer> capturer_;
+ cricket::VideoCapturerListener listener_;
+};
+
+TEST_F(WebRtcVideoCapturerTest, TestNotOpened) {
+ EXPECT_EQ("", capturer_->GetId());
+ EXPECT_TRUE(capturer_->GetSupportedFormats()->empty());
+ EXPECT_TRUE(capturer_->GetCaptureFormat() == NULL);
+ EXPECT_FALSE(capturer_->IsRunning());
+}
+
+TEST_F(WebRtcVideoCapturerTest, TestBadInit) {
+ EXPECT_FALSE(capturer_->Init(cricket::Device("bad-name", "bad-id")));
+ EXPECT_FALSE(capturer_->IsRunning());
+}
+
+TEST_F(WebRtcVideoCapturerTest, TestInit) {
+ EXPECT_TRUE(capturer_->Init(cricket::Device(kTestDeviceName, kTestDeviceId)));
+ EXPECT_EQ(kTestDeviceId, capturer_->GetId());
+ EXPECT_TRUE(NULL != capturer_->GetSupportedFormats());
+ ASSERT_EQ(1U, capturer_->GetSupportedFormats()->size());
+ EXPECT_EQ(640, (*capturer_->GetSupportedFormats())[0].width);
+ EXPECT_EQ(480, (*capturer_->GetSupportedFormats())[0].height);
+ EXPECT_TRUE(capturer_->GetCaptureFormat() == NULL); // not started yet
+ EXPECT_FALSE(capturer_->IsRunning());
+}
+
+TEST_F(WebRtcVideoCapturerTest, TestInitVcm) {
+ EXPECT_TRUE(capturer_->Init(factory_->Create(0,
+ reinterpret_cast<const char*>(kTestDeviceId.c_str()))));
+}
+
+TEST_F(WebRtcVideoCapturerTest, TestCapture) {
+ EXPECT_TRUE(capturer_->Init(cricket::Device(kTestDeviceName, kTestDeviceId)));
+ cricket::VideoFormat format(
+ capturer_->GetSupportedFormats()->at(0));
+ EXPECT_EQ(cricket::CS_STARTING, capturer_->Start(format));
+ EXPECT_TRUE(capturer_->IsRunning());
+ ASSERT_TRUE(capturer_->GetCaptureFormat() != NULL);
+ EXPECT_EQ(format, *capturer_->GetCaptureFormat());
+ EXPECT_EQ_WAIT(cricket::CS_RUNNING, listener_.last_capture_state(), 1000);
+ EXPECT_TRUE(factory_->modules[0]->SendFrame(640, 480));
+ EXPECT_TRUE_WAIT(listener_.frame_count() > 0, 5000);
+ EXPECT_EQ(capturer_->GetCaptureFormat()->fourcc, listener_.frame_fourcc());
+ EXPECT_EQ(640, listener_.frame_width());
+ EXPECT_EQ(480, listener_.frame_height());
+ EXPECT_EQ(cricket::CS_FAILED, capturer_->Start(format));
+ capturer_->Stop();
+ EXPECT_FALSE(capturer_->IsRunning());
+ EXPECT_TRUE(capturer_->GetCaptureFormat() == NULL);
+}
+
+TEST_F(WebRtcVideoCapturerTest, TestCaptureVcm) {
+ EXPECT_TRUE(capturer_->Init(factory_->Create(0,
+ reinterpret_cast<const char*>(kTestDeviceId.c_str()))));
+ EXPECT_TRUE(capturer_->GetSupportedFormats()->empty());
+ VideoFormat format;
+ EXPECT_TRUE(capturer_->GetBestCaptureFormat(kDefaultVideoFormat, &format));
+ EXPECT_EQ(kDefaultVideoFormat.width, format.width);
+ EXPECT_EQ(kDefaultVideoFormat.height, format.height);
+ EXPECT_EQ(kDefaultVideoFormat.interval, format.interval);
+ EXPECT_EQ(cricket::FOURCC_I420, format.fourcc);
+ EXPECT_EQ(cricket::CS_STARTING, capturer_->Start(format));
+ EXPECT_TRUE(capturer_->IsRunning());
+ ASSERT_TRUE(capturer_->GetCaptureFormat() != NULL);
+ EXPECT_EQ(format, *capturer_->GetCaptureFormat());
+ EXPECT_EQ_WAIT(cricket::CS_RUNNING, listener_.last_capture_state(), 1000);
+ EXPECT_TRUE(factory_->modules[0]->SendFrame(640, 480));
+ EXPECT_TRUE_WAIT(listener_.frame_count() > 0, 5000);
+ EXPECT_EQ(capturer_->GetCaptureFormat()->fourcc, listener_.frame_fourcc());
+ EXPECT_EQ(640, listener_.frame_width());
+ EXPECT_EQ(480, listener_.frame_height());
+ EXPECT_EQ(cricket::CS_FAILED, capturer_->Start(format));
+ capturer_->Stop();
+ EXPECT_FALSE(capturer_->IsRunning());
+ EXPECT_TRUE(capturer_->GetCaptureFormat() == NULL);
+}
+
+TEST_F(WebRtcVideoCapturerTest, TestCaptureWithoutInit) {
+ cricket::VideoFormat format;
+ EXPECT_EQ(cricket::CS_NO_DEVICE, capturer_->Start(format));
+ EXPECT_TRUE(capturer_->GetCaptureFormat() == NULL);
+ EXPECT_FALSE(capturer_->IsRunning());
+}
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideodecoderfactory.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideodecoderfactory.h
new file mode 100644
index 00000000000..483bca7d39b
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideodecoderfactory.h
@@ -0,0 +1,53 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_WEBRTC_WEBRTCVIDEODECODERFACTORY_H_
+#define TALK_MEDIA_WEBRTC_WEBRTCVIDEODECODERFACTORY_H_
+
+#include "talk/base/refcount.h"
+#include "webrtc/common_types.h"
+
+namespace webrtc {
+class VideoDecoder;
+}
+
+namespace cricket {
+
+class WebRtcVideoDecoderFactory {
+ public:
+ // Caller takes the ownership of the returned object and it should be released
+ // by calling DestroyVideoDecoder().
+ virtual webrtc::VideoDecoder* CreateVideoDecoder(
+ webrtc::VideoCodecType type) = 0;
+ virtual ~WebRtcVideoDecoderFactory() {}
+
+ virtual void DestroyVideoDecoder(webrtc::VideoDecoder* decoder) = 0;
+};
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTC_WEBRTCVIDEODECODERFACTORY_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoencoderfactory.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoencoderfactory.h
new file mode 100644
index 00000000000..a84430963b6
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoencoderfactory.h
@@ -0,0 +1,89 @@
+/*
+ * libjingle
+ * Copyright 2013, Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_WEBRTC_WEBRTCVIDEOENCODERFACTORY_H_
+#define TALK_MEDIA_WEBRTC_WEBRTCVIDEOENCODERFACTORY_H_
+
+#include "talk/base/refcount.h"
+#include "talk/media/base/codec.h"
+#include "webrtc/common_types.h"
+
+namespace webrtc {
+class VideoEncoder;
+}
+
+namespace cricket {
+
+class WebRtcVideoEncoderFactory {
+ public:
+ struct VideoCodec {
+ webrtc::VideoCodecType type;
+ std::string name;
+ int max_width;
+ int max_height;
+ int max_fps;
+
+ VideoCodec(webrtc::VideoCodecType t, const std::string& nm, int w, int h,
+ int fr)
+ : type(t), name(nm), max_width(w), max_height(h), max_fps(fr) {
+ }
+ };
+
+ class Observer {
+ public:
+ // Invoked when the list of supported codecs becomes available.
+ // This will not be invoked if the list of codecs is already available when
+ // the factory is installed. Otherwise this will be invoked only once if the
+ // list of codecs is not yet available when the factory is installed.
+ virtual void OnCodecsAvailable() = 0;
+
+ protected:
+ virtual ~Observer() {}
+ };
+
+ // Caller takes the ownership of the returned object and it should be released
+ // by calling DestroyVideoEncoder().
+ virtual webrtc::VideoEncoder* CreateVideoEncoder(
+ webrtc::VideoCodecType type) = 0;
+ virtual ~WebRtcVideoEncoderFactory() {}
+
+ // Adds/removes observer to receive OnCodecsChanged notifications.
+ // Factory must outlive Observer. Observer is responsible for removing itself
+ // from the Factory by the time its dtor is done.
+ virtual void AddObserver(Observer* observer) = 0;
+ virtual void RemoveObserver(Observer* observer) = 0;
+
+ // Returns a list of supported codecs in order of preference.
+ // The list is empty if the list of codecs is not yet available.
+ virtual const std::vector<VideoCodec>& codecs() const = 0;
+
+ virtual void DestroyVideoEncoder(webrtc::VideoEncoder* encoder) = 0;
+};
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTC_WEBRTCVIDEOENCODERFACTORY_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine.cc
new file mode 100644
index 00000000000..873b249b70f
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine.cc
@@ -0,0 +1,3487 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_WEBRTC_VIDEO
+#include "talk/media/webrtc/webrtcvideoengine.h"
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <math.h>
+#include <set>
+
+#include "talk/base/basictypes.h"
+#include "talk/base/buffer.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/common.h"
+#include "talk/base/cpumonitor.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringutils.h"
+#include "talk/base/thread.h"
+#include "talk/base/timeutils.h"
+#include "talk/media/base/constants.h"
+#include "talk/media/base/rtputils.h"
+#include "talk/media/base/streamparams.h"
+#include "talk/media/base/videoadapter.h"
+#include "talk/media/base/videocapturer.h"
+#include "talk/media/base/videorenderer.h"
+#include "talk/media/devices/filevideocapturer.h"
+#include "talk/media/webrtc/webrtcpassthroughrender.h"
+#include "talk/media/webrtc/webrtctexturevideoframe.h"
+#include "talk/media/webrtc/webrtcvideocapturer.h"
+#include "talk/media/webrtc/webrtcvideodecoderfactory.h"
+#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
+#include "talk/media/webrtc/webrtcvideoframe.h"
+#include "talk/media/webrtc/webrtcvie.h"
+#include "talk/media/webrtc/webrtcvoe.h"
+#include "talk/media/webrtc/webrtcvoiceengine.h"
+
+#if !defined(LIBPEERCONNECTION_LIB)
+#ifndef HAVE_WEBRTC_VIDEO
+#error Need webrtc video
+#endif
+#include "talk/media/webrtc/webrtcmediaengine.h"
+
+WRME_EXPORT
+cricket::MediaEngineInterface* CreateWebRtcMediaEngine(
+ webrtc::AudioDeviceModule* adm, webrtc::AudioDeviceModule* adm_sc,
+ cricket::WebRtcVideoEncoderFactory* encoder_factory,
+ cricket::WebRtcVideoDecoderFactory* decoder_factory) {
+ return new cricket::WebRtcMediaEngine(adm, adm_sc, encoder_factory,
+ decoder_factory);
+}
+
+WRME_EXPORT
+void DestroyWebRtcMediaEngine(cricket::MediaEngineInterface* media_engine) {
+ delete static_cast<cricket::WebRtcMediaEngine*>(media_engine);
+}
+#endif
+
+
+namespace cricket {
+
+
+static const int kDefaultLogSeverity = talk_base::LS_WARNING;
+
+static const int kMinVideoBitrate = 50;
+static const int kStartVideoBitrate = 300;
+static const int kMaxVideoBitrate = 2000;
+static const int kDefaultConferenceModeMaxVideoBitrate = 500;
+
+static const int kVideoMtu = 1200;
+
+static const int kVideoRtpBufferSize = 65536;
+
+static const char kVp8PayloadName[] = "VP8";
+static const char kRedPayloadName[] = "red";
+static const char kFecPayloadName[] = "ulpfec";
+
+static const int kDefaultNumberOfTemporalLayers = 1; // 1:1
+
+static const int kTimestampDeltaInSecondsForWarning = 2;
+
+static const int kMaxExternalVideoCodecs = 8;
+static const int kExternalVideoPayloadTypeBase = 120;
+
+// Static allocation of payload type values for external video codec.
+static int GetExternalVideoPayloadType(int index) {
+ ASSERT(index >= 0 && index < kMaxExternalVideoCodecs);
+ return kExternalVideoPayloadTypeBase + index;
+}
+
+static void LogMultiline(talk_base::LoggingSeverity sev, char* text) {
+ const char* delim = "\r\n";
+ // TODO(fbarchard): Fix strtok lint warning.
+ for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
+ LOG_V(sev) << tok;
+ }
+}
+
+// Severity is an integer because it comes is assumed to be from command line.
+static int SeverityToFilter(int severity) {
+ int filter = webrtc::kTraceNone;
+ switch (severity) {
+ case talk_base::LS_VERBOSE:
+ filter |= webrtc::kTraceAll;
+ case talk_base::LS_INFO:
+ filter |= (webrtc::kTraceStateInfo | webrtc::kTraceInfo);
+ case talk_base::LS_WARNING:
+ filter |= (webrtc::kTraceTerseInfo | webrtc::kTraceWarning);
+ case talk_base::LS_ERROR:
+ filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
+ }
+ return filter;
+}
+
+static const int kCpuMonitorPeriodMs = 2000; // 2 seconds.
+
+static const bool kNotSending = false;
+
+// Extension header for RTP timestamp offset, see RFC 5450 for details:
+// http://tools.ietf.org/html/rfc5450
+static const char kRtpTimestampOffsetHeaderExtension[] =
+ "urn:ietf:params:rtp-hdrext:toffset";
+static const int kRtpTimeOffsetExtensionId = 2;
+
+// Extension header for absolute send time, see url for details:
+// http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time
+static const char kRtpAbsoluteSendTimeHeaderExtension[] =
+ "http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time";
+static const int kRtpAbsoluteSendTimeExtensionId = 3;
+
+static bool IsNackEnabled(const VideoCodec& codec) {
+ return codec.HasFeedbackParam(FeedbackParam(kRtcpFbParamNack,
+ kParamValueEmpty));
+}
+
+// Returns true if Receiver Estimated Max Bitrate is enabled.
+static bool IsRembEnabled(const VideoCodec& codec) {
+ return codec.HasFeedbackParam(FeedbackParam(kRtcpFbParamRemb,
+ kParamValueEmpty));
+}
+
+struct FlushBlackFrameData : public talk_base::MessageData {
+ FlushBlackFrameData(uint32 s, int64 t) : ssrc(s), timestamp(t) {
+ }
+ uint32 ssrc;
+ int64 timestamp;
+};
+
+class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
+ public:
+ explicit WebRtcRenderAdapter(VideoRenderer* renderer)
+ : renderer_(renderer), width_(0), height_(0), watermark_enabled_(false) {
+ }
+
+ virtual ~WebRtcRenderAdapter() {
+ }
+
+ void set_watermark_enabled(bool enable) {
+ talk_base::CritScope cs(&crit_);
+ watermark_enabled_ = enable;
+ }
+
+ void SetRenderer(VideoRenderer* renderer) {
+ talk_base::CritScope cs(&crit_);
+ renderer_ = renderer;
+ // FrameSizeChange may have already been called when renderer was not set.
+ // If so we should call SetSize here.
+ // TODO(ronghuawu): Add unit test for this case. Didn't do it now
+ // because the WebRtcRenderAdapter is currently hiding in cc file. No
+ // good way to get access to it from the unit test.
+ if (width_ > 0 && height_ > 0 && renderer_ != NULL) {
+ if (!renderer_->SetSize(width_, height_, 0)) {
+ LOG(LS_ERROR)
+ << "WebRtcRenderAdapter SetRenderer failed to SetSize to: "
+ << width_ << "x" << height_;
+ }
+ }
+ }
+
+ // Implementation of webrtc::ExternalRenderer.
+ virtual int FrameSizeChange(unsigned int width, unsigned int height,
+ unsigned int /*number_of_streams*/) {
+ talk_base::CritScope cs(&crit_);
+ width_ = width;
+ height_ = height;
+ LOG(LS_INFO) << "WebRtcRenderAdapter frame size changed to: "
+ << width << "x" << height;
+ if (renderer_ == NULL) {
+ LOG(LS_VERBOSE) << "WebRtcRenderAdapter the renderer has not been set. "
+ << "SetSize will be called later in SetRenderer.";
+ return 0;
+ }
+ return renderer_->SetSize(width_, height_, 0) ? 0 : -1;
+ }
+
+ virtual int DeliverFrame(unsigned char* buffer, int buffer_size,
+ uint32_t time_stamp, int64_t render_time
+#ifdef USE_WEBRTC_DEV_BRANCH
+ , void* handle
+#endif
+ ) {
+ talk_base::CritScope cs(&crit_);
+ frame_rate_tracker_.Update(1);
+ if (renderer_ == NULL) {
+ return 0;
+ }
+ // Convert 90K rtp timestamp to ns timestamp.
+ int64 rtp_time_stamp_in_ns = (time_stamp / 90) *
+ talk_base::kNumNanosecsPerMillisec;
+ // Convert milisecond render time to ns timestamp.
+ int64 render_time_stamp_in_ns = render_time *
+ talk_base::kNumNanosecsPerMillisec;
+ // Send the rtp timestamp to renderer as the VideoFrame timestamp.
+ // and the render timestamp as the VideoFrame elapsed_time.
+#ifdef USE_WEBRTC_DEV_BRANCH
+ if (handle == NULL) {
+#endif
+ return DeliverBufferFrame(buffer, buffer_size, render_time_stamp_in_ns,
+ rtp_time_stamp_in_ns);
+#ifdef USE_WEBRTC_DEV_BRANCH
+ } else {
+ return DeliverTextureFrame(handle, render_time_stamp_in_ns,
+ rtp_time_stamp_in_ns);
+ }
+#endif
+ }
+
+ virtual bool IsTextureSupported() { return true; }
+
+ int DeliverBufferFrame(unsigned char* buffer, int buffer_size,
+ int64 elapsed_time, int64 time_stamp) {
+ WebRtcVideoFrame video_frame;
+ video_frame.Attach(buffer, buffer_size, width_, height_,
+ 1, 1, elapsed_time, time_stamp, 0);
+
+
+ // Sanity check on decoded frame size.
+ if (buffer_size != static_cast<int>(VideoFrame::SizeOf(width_, height_))) {
+ LOG(LS_WARNING) << "WebRtcRenderAdapter received a strange frame size: "
+ << buffer_size;
+ }
+
+ int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1;
+ uint8* buffer_temp;
+ size_t buffer_size_temp;
+ video_frame.Detach(&buffer_temp, &buffer_size_temp);
+ return ret;
+ }
+
+ int DeliverTextureFrame(void* handle, int64 elapsed_time, int64 time_stamp) {
+ WebRtcTextureVideoFrame video_frame(
+ static_cast<webrtc::NativeHandle*>(handle), width_, height_,
+ elapsed_time, time_stamp);
+ return renderer_->RenderFrame(&video_frame);
+ }
+
+ unsigned int width() {
+ talk_base::CritScope cs(&crit_);
+ return width_;
+ }
+
+ unsigned int height() {
+ talk_base::CritScope cs(&crit_);
+ return height_;
+ }
+
+ int framerate() {
+ talk_base::CritScope cs(&crit_);
+ return static_cast<int>(frame_rate_tracker_.units_second());
+ }
+
+ VideoRenderer* renderer() {
+ talk_base::CritScope cs(&crit_);
+ return renderer_;
+ }
+
+ private:
+ talk_base::CriticalSection crit_;
+ VideoRenderer* renderer_;
+ unsigned int width_;
+ unsigned int height_;
+ talk_base::RateTracker frame_rate_tracker_;
+ bool watermark_enabled_;
+};
+
+class WebRtcDecoderObserver : public webrtc::ViEDecoderObserver {
+ public:
+ explicit WebRtcDecoderObserver(int video_channel)
+ : video_channel_(video_channel),
+ framerate_(0),
+ bitrate_(0),
+ firs_requested_(0) {
+ }
+
+ // virtual functions from VieDecoderObserver.
+ virtual void IncomingCodecChanged(const int videoChannel,
+ const webrtc::VideoCodec& videoCodec) {}
+ virtual void IncomingRate(const int videoChannel,
+ const unsigned int framerate,
+ const unsigned int bitrate) {
+ ASSERT(video_channel_ == videoChannel);
+ framerate_ = framerate;
+ bitrate_ = bitrate;
+ }
+ virtual void RequestNewKeyFrame(const int videoChannel) {
+ ASSERT(video_channel_ == videoChannel);
+ ++firs_requested_;
+ }
+
+ int framerate() const { return framerate_; }
+ int bitrate() const { return bitrate_; }
+ int firs_requested() const { return firs_requested_; }
+
+ private:
+ int video_channel_;
+ int framerate_;
+ int bitrate_;
+ int firs_requested_;
+};
+
+class WebRtcEncoderObserver : public webrtc::ViEEncoderObserver {
+ public:
+ explicit WebRtcEncoderObserver(int video_channel)
+ : video_channel_(video_channel),
+ framerate_(0),
+ bitrate_(0) {
+ }
+
+ // virtual functions from VieEncoderObserver.
+ virtual void OutgoingRate(const int videoChannel,
+ const unsigned int framerate,
+ const unsigned int bitrate) {
+ ASSERT(video_channel_ == videoChannel);
+ framerate_ = framerate;
+ bitrate_ = bitrate;
+ }
+
+ int framerate() const { return framerate_; }
+ int bitrate() const { return bitrate_; }
+
+ private:
+ int video_channel_;
+ int framerate_;
+ int bitrate_;
+};
+
+class WebRtcLocalStreamInfo {
+ public:
+ WebRtcLocalStreamInfo()
+ : width_(0), height_(0), elapsed_time_(-1), time_stamp_(-1) {}
+ size_t width() const {
+ talk_base::CritScope cs(&crit_);
+ return width_;
+ }
+ size_t height() const {
+ talk_base::CritScope cs(&crit_);
+ return height_;
+ }
+ int64 elapsed_time() const {
+ talk_base::CritScope cs(&crit_);
+ return elapsed_time_;
+ }
+ int64 time_stamp() const {
+ talk_base::CritScope cs(&crit_);
+ return time_stamp_;
+ }
+ int framerate() {
+ talk_base::CritScope cs(&crit_);
+ return static_cast<int>(rate_tracker_.units_second());
+ }
+ void GetLastFrameInfo(
+ size_t* width, size_t* height, int64* elapsed_time) const {
+ talk_base::CritScope cs(&crit_);
+ *width = width_;
+ *height = height_;
+ *elapsed_time = elapsed_time_;
+ }
+
+ void UpdateFrame(const VideoFrame* frame) {
+ talk_base::CritScope cs(&crit_);
+
+ width_ = frame->GetWidth();
+ height_ = frame->GetHeight();
+ elapsed_time_ = frame->GetElapsedTime();
+ time_stamp_ = frame->GetTimeStamp();
+
+ rate_tracker_.Update(1);
+ }
+
+ private:
+ mutable talk_base::CriticalSection crit_;
+ size_t width_;
+ size_t height_;
+ int64 elapsed_time_;
+ int64 time_stamp_;
+ talk_base::RateTracker rate_tracker_;
+
+ DISALLOW_COPY_AND_ASSIGN(WebRtcLocalStreamInfo);
+};
+
+// WebRtcVideoChannelRecvInfo is a container class with members such as renderer
+// and a decoder observer that is used by receive channels.
+// It must exist as long as the receive channel is connected to renderer or a
+// decoder observer in this class and methods in the class should only be called
+// from the worker thread.
+class WebRtcVideoChannelRecvInfo {
+ public:
+ typedef std::map<int, webrtc::VideoDecoder*> DecoderMap; // key: payload type
+ explicit WebRtcVideoChannelRecvInfo(int channel_id)
+ : channel_id_(channel_id),
+ render_adapter_(NULL),
+ decoder_observer_(channel_id) {
+ }
+ int channel_id() { return channel_id_; }
+ void SetRenderer(VideoRenderer* renderer) {
+ render_adapter_.SetRenderer(renderer);
+ }
+ WebRtcRenderAdapter* render_adapter() { return &render_adapter_; }
+ WebRtcDecoderObserver* decoder_observer() { return &decoder_observer_; }
+ void RegisterDecoder(int pl_type, webrtc::VideoDecoder* decoder) {
+ ASSERT(!IsDecoderRegistered(pl_type));
+ registered_decoders_[pl_type] = decoder;
+ }
+ bool IsDecoderRegistered(int pl_type) {
+ return registered_decoders_.count(pl_type) != 0;
+ }
+ const DecoderMap& registered_decoders() {
+ return registered_decoders_;
+ }
+ void ClearRegisteredDecoders() {
+ registered_decoders_.clear();
+ }
+
+ private:
+ int channel_id_; // Webrtc video channel number.
+ // Renderer for this channel.
+ WebRtcRenderAdapter render_adapter_;
+ WebRtcDecoderObserver decoder_observer_;
+ DecoderMap registered_decoders_;
+};
+
+class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
+ public:
+ typedef std::map<int, webrtc::VideoEncoder*> EncoderMap; // key: payload type
+ WebRtcVideoChannelSendInfo(int channel_id, int capture_id,
+ webrtc::ViEExternalCapture* external_capture,
+ talk_base::CpuMonitor* cpu_monitor)
+ : channel_id_(channel_id),
+ capture_id_(capture_id),
+ sending_(false),
+ muted_(false),
+ video_capturer_(NULL),
+ encoder_observer_(channel_id),
+ external_capture_(external_capture),
+ capturer_updated_(false),
+ interval_(0),
+ video_adapter_(new CoordinatedVideoAdapter) {
+ SignalCpuAdaptationUnable.repeat(video_adapter_->SignalCpuAdaptationUnable);
+ if (cpu_monitor) {
+ cpu_monitor->SignalUpdate.connect(
+ video_adapter_.get(), &CoordinatedVideoAdapter::OnCpuLoadUpdated);
+ }
+ }
+
+ int channel_id() const { return channel_id_; }
+ int capture_id() const { return capture_id_; }
+ void set_sending(bool sending) { sending_ = sending; }
+ bool sending() const { return sending_; }
+ void set_muted(bool on) {
+ // TODO(asapersson): add support.
+ // video_adapter_->SetBlackOutput(on);
+ muted_ = on;
+ }
+ bool muted() {return muted_; }
+
+ WebRtcEncoderObserver* encoder_observer() { return &encoder_observer_; }
+ webrtc::ViEExternalCapture* external_capture() { return external_capture_; }
+ const VideoFormat& video_format() const {
+ return video_format_;
+ }
+ void set_video_format(const VideoFormat& video_format) {
+ video_format_ = video_format;
+ if (video_format_ != cricket::VideoFormat()) {
+ interval_ = video_format_.interval;
+ }
+ video_adapter_->OnOutputFormatRequest(video_format_);
+ }
+ void set_interval(int64 interval) {
+ if (video_format() == cricket::VideoFormat()) {
+ interval_ = interval;
+ }
+ }
+ int64 interval() { return interval_; }
+
+ void InitializeAdapterOutputFormat(const webrtc::VideoCodec& codec) {
+ VideoFormat format(codec.width, codec.height,
+ VideoFormat::FpsToInterval(codec.maxFramerate),
+ FOURCC_I420);
+ if (video_adapter_->output_format().IsSize0x0()) {
+ video_adapter_->SetOutputFormat(format);
+ }
+ }
+
+ bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame) {
+ *out_frame = NULL;
+ return video_adapter_->AdaptFrame(in_frame, out_frame);
+ }
+ int CurrentAdaptReason() const {
+ return video_adapter_->adapt_reason();
+ }
+
+ StreamParams* stream_params() { return stream_params_.get(); }
+ void set_stream_params(const StreamParams& sp) {
+ stream_params_.reset(new StreamParams(sp));
+ }
+ void ClearStreamParams() { stream_params_.reset(); }
+ bool has_ssrc(uint32 local_ssrc) const {
+ return !stream_params_ ? false :
+ stream_params_->has_ssrc(local_ssrc);
+ }
+ WebRtcLocalStreamInfo* local_stream_info() {
+ return &local_stream_info_;
+ }
+ VideoCapturer* video_capturer() {
+ return video_capturer_;
+ }
+ void set_video_capturer(VideoCapturer* video_capturer) {
+ if (video_capturer == video_capturer_) {
+ return;
+ }
+ capturer_updated_ = true;
+ video_capturer_ = video_capturer;
+ if (video_capturer && !video_capturer->IsScreencast()) {
+ const VideoFormat* capture_format = video_capturer->GetCaptureFormat();
+ if (capture_format) {
+ // TODO(thorcarpenter): This is broken. Video capturer doesn't have
+ // a capture format until the capturer is started. So, if
+ // the capturer is started immediately after calling set_video_capturer
+ // video adapter may not have the input format set, the interval may
+ // be zero, and all frames may be dropped.
+ // Consider fixing this by having video_adapter keep a pointer to the
+ // video capturer.
+ video_adapter_->SetInputFormat(*capture_format);
+ }
+ }
+ }
+
+ void ApplyCpuOptions(const VideoOptions& options) {
+ bool cpu_adapt, cpu_smoothing;
+ float low, med, high;
+ if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
+ video_adapter_->set_cpu_adaptation(cpu_adapt);
+ }
+ if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
+ video_adapter_->set_cpu_smoothing(cpu_smoothing);
+ }
+ if (options.process_adaptation_threshhold.Get(&med)) {
+ video_adapter_->set_process_threshold(med);
+ }
+ if (options.system_low_adaptation_threshhold.Get(&low)) {
+ video_adapter_->set_low_system_threshold(low);
+ }
+ if (options.system_high_adaptation_threshhold.Get(&high)) {
+ video_adapter_->set_high_system_threshold(high);
+ }
+ }
+ void ProcessFrame(const VideoFrame& original_frame, bool mute,
+ VideoFrame** processed_frame) {
+ if (!mute) {
+ *processed_frame = original_frame.Copy();
+ } else {
+ WebRtcVideoFrame* black_frame = new WebRtcVideoFrame();
+ black_frame->InitToBlack(static_cast<int>(original_frame.GetWidth()),
+ static_cast<int>(original_frame.GetHeight()),
+ 1, 1,
+ original_frame.GetElapsedTime(),
+ original_frame.GetTimeStamp());
+ *processed_frame = black_frame;
+ }
+ local_stream_info_.UpdateFrame(*processed_frame);
+ }
+ void RegisterEncoder(int pl_type, webrtc::VideoEncoder* encoder) {
+ ASSERT(!IsEncoderRegistered(pl_type));
+ registered_encoders_[pl_type] = encoder;
+ }
+ bool IsEncoderRegistered(int pl_type) {
+ return registered_encoders_.count(pl_type) != 0;
+ }
+ const EncoderMap& registered_encoders() {
+ return registered_encoders_;
+ }
+ void ClearRegisteredEncoders() {
+ registered_encoders_.clear();
+ }
+
+ sigslot::repeater0<> SignalCpuAdaptationUnable;
+
+ private:
+ int channel_id_;
+ int capture_id_;
+ bool sending_;
+ bool muted_;
+ VideoCapturer* video_capturer_;
+ WebRtcEncoderObserver encoder_observer_;
+ webrtc::ViEExternalCapture* external_capture_;
+ EncoderMap registered_encoders_;
+
+ VideoFormat video_format_;
+
+ talk_base::scoped_ptr<StreamParams> stream_params_;
+
+ WebRtcLocalStreamInfo local_stream_info_;
+
+ bool capturer_updated_;
+
+ int64 interval_;
+
+ talk_base::scoped_ptr<CoordinatedVideoAdapter> video_adapter_;
+};
+
+const WebRtcVideoEngine::VideoCodecPref
+ WebRtcVideoEngine::kVideoCodecPrefs[] = {
+ {kVp8PayloadName, 100, 0},
+ {kRedPayloadName, 116, 1},
+ {kFecPayloadName, 117, 2},
+};
+
+// The formats are sorted by the descending order of width. We use the order to
+// find the next format for CPU and bandwidth adaptation.
+const VideoFormatPod WebRtcVideoEngine::kVideoFormats[] = {
+ {1280, 800, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {1280, 720, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {960, 600, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {960, 540, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {640, 400, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {640, 360, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {640, 480, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {480, 300, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {480, 270, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {480, 360, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {320, 200, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {320, 180, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {320, 240, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {240, 150, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {240, 135, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {240, 180, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {160, 100, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {160, 90, FPS_TO_INTERVAL(30), FOURCC_ANY},
+ {160, 120, FPS_TO_INTERVAL(30), FOURCC_ANY},
+};
+
+const VideoFormatPod WebRtcVideoEngine::kDefaultVideoFormat =
+ {640, 400, FPS_TO_INTERVAL(30), FOURCC_ANY};
+
+static void UpdateVideoCodec(const cricket::VideoFormat& video_format,
+ webrtc::VideoCodec* target_codec) {
+ if ((target_codec == NULL) || (video_format == cricket::VideoFormat())) {
+ return;
+ }
+ target_codec->width = video_format.width;
+ target_codec->height = video_format.height;
+ target_codec->maxFramerate = cricket::VideoFormat::IntervalToFps(
+ video_format.interval);
+}
+
+WebRtcVideoEngine::WebRtcVideoEngine() {
+ Construct(new ViEWrapper(), new ViETraceWrapper(), NULL,
+ new talk_base::CpuMonitor(NULL));
+}
+
+WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
+ ViEWrapper* vie_wrapper,
+ talk_base::CpuMonitor* cpu_monitor) {
+ Construct(vie_wrapper, new ViETraceWrapper(), voice_engine, cpu_monitor);
+}
+
+WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
+ ViEWrapper* vie_wrapper,
+ ViETraceWrapper* tracing,
+ talk_base::CpuMonitor* cpu_monitor) {
+ Construct(vie_wrapper, tracing, voice_engine, cpu_monitor);
+}
+
+void WebRtcVideoEngine::Construct(ViEWrapper* vie_wrapper,
+ ViETraceWrapper* tracing,
+ WebRtcVoiceEngine* voice_engine,
+ talk_base::CpuMonitor* cpu_monitor) {
+ LOG(LS_INFO) << "WebRtcVideoEngine::WebRtcVideoEngine";
+ worker_thread_ = NULL;
+ vie_wrapper_.reset(vie_wrapper);
+ vie_wrapper_base_initialized_ = false;
+ tracing_.reset(tracing);
+ voice_engine_ = voice_engine;
+ initialized_ = false;
+ SetTraceFilter(SeverityToFilter(kDefaultLogSeverity));
+ render_module_.reset(new WebRtcPassthroughRender());
+ local_renderer_w_ = local_renderer_h_ = 0;
+ local_renderer_ = NULL;
+ capture_started_ = false;
+ decoder_factory_ = NULL;
+ encoder_factory_ = NULL;
+ cpu_monitor_.reset(cpu_monitor);
+
+ SetTraceOptions("");
+ if (tracing_->SetTraceCallback(this) != 0) {
+ LOG_RTCERR1(SetTraceCallback, this);
+ }
+
+ // Set default quality levels for our supported codecs. We override them here
+ // if we know your cpu performance is low, and they can be updated explicitly
+ // by calling SetDefaultCodec. For example by a flute preference setting, or
+ // by the server with a jec in response to our reported system info.
+ VideoCodec max_codec(kVideoCodecPrefs[0].payload_type,
+ kVideoCodecPrefs[0].name,
+ kDefaultVideoFormat.width,
+ kDefaultVideoFormat.height,
+ VideoFormat::IntervalToFps(kDefaultVideoFormat.interval),
+ 0);
+ if (!SetDefaultCodec(max_codec)) {
+ LOG(LS_ERROR) << "Failed to initialize list of supported codec types";
+ }
+
+
+ // Load our RTP Header extensions.
+ rtp_header_extensions_.push_back(
+ RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension,
+ kRtpTimeOffsetExtensionId));
+ rtp_header_extensions_.push_back(
+ RtpHeaderExtension(kRtpAbsoluteSendTimeHeaderExtension,
+ kRtpAbsoluteSendTimeExtensionId));
+}
+
+WebRtcVideoEngine::~WebRtcVideoEngine() {
+ LOG(LS_INFO) << "WebRtcVideoEngine::~WebRtcVideoEngine";
+ if (initialized_) {
+ Terminate();
+ }
+ if (encoder_factory_) {
+ encoder_factory_->RemoveObserver(this);
+ }
+ tracing_->SetTraceCallback(NULL);
+ // Test to see if the media processor was deregistered properly.
+ ASSERT(SignalMediaFrame.is_empty());
+}
+
+bool WebRtcVideoEngine::Init(talk_base::Thread* worker_thread) {
+ LOG(LS_INFO) << "WebRtcVideoEngine::Init";
+ worker_thread_ = worker_thread;
+ ASSERT(worker_thread_ != NULL);
+
+ cpu_monitor_->set_thread(worker_thread_);
+ if (!cpu_monitor_->Start(kCpuMonitorPeriodMs)) {
+ LOG(LS_ERROR) << "Failed to start CPU monitor.";
+ cpu_monitor_.reset();
+ }
+
+ bool result = InitVideoEngine();
+ if (result) {
+ LOG(LS_INFO) << "VideoEngine Init done";
+ } else {
+ LOG(LS_ERROR) << "VideoEngine Init failed, releasing";
+ Terminate();
+ }
+ return result;
+}
+
+bool WebRtcVideoEngine::InitVideoEngine() {
+ LOG(LS_INFO) << "WebRtcVideoEngine::InitVideoEngine";
+
+ // Init WebRTC VideoEngine.
+ if (!vie_wrapper_base_initialized_) {
+ if (vie_wrapper_->base()->Init() != 0) {
+ LOG_RTCERR0(Init);
+ return false;
+ }
+ vie_wrapper_base_initialized_ = true;
+ }
+
+ // Log the VoiceEngine version info.
+ char buffer[1024] = "";
+ if (vie_wrapper_->base()->GetVersion(buffer) != 0) {
+ LOG_RTCERR0(GetVersion);
+ return false;
+ }
+
+ LOG(LS_INFO) << "WebRtc VideoEngine Version:";
+ LogMultiline(talk_base::LS_INFO, buffer);
+
+ // Hook up to VoiceEngine for sync purposes, if supplied.
+ if (!voice_engine_) {
+ LOG(LS_WARNING) << "NULL voice engine";
+ } else if ((vie_wrapper_->base()->SetVoiceEngine(
+ voice_engine_->voe()->engine())) != 0) {
+ LOG_RTCERR0(SetVoiceEngine);
+ return false;
+ }
+
+ // Register our custom render module.
+ if (vie_wrapper_->render()->RegisterVideoRenderModule(
+ *render_module_.get()) != 0) {
+ LOG_RTCERR0(RegisterVideoRenderModule);
+ return false;
+ }
+
+ initialized_ = true;
+ return true;
+}
+
+void WebRtcVideoEngine::Terminate() {
+ LOG(LS_INFO) << "WebRtcVideoEngine::Terminate";
+ initialized_ = false;
+
+ if (vie_wrapper_->render()->DeRegisterVideoRenderModule(
+ *render_module_.get()) != 0) {
+ LOG_RTCERR0(DeRegisterVideoRenderModule);
+ }
+
+ if (vie_wrapper_->base()->SetVoiceEngine(NULL) != 0) {
+ LOG_RTCERR0(SetVoiceEngine);
+ }
+
+ cpu_monitor_->Stop();
+}
+
+int WebRtcVideoEngine::GetCapabilities() {
+ return VIDEO_RECV | VIDEO_SEND;
+}
+
+bool WebRtcVideoEngine::SetOptions(int options) {
+ return true;
+}
+
+bool WebRtcVideoEngine::SetDefaultEncoderConfig(
+ const VideoEncoderConfig& config) {
+ return SetDefaultCodec(config.max_codec);
+}
+
+// SetDefaultCodec may be called while the capturer is running. For example, a
+// test call is started in a page with QVGA default codec, and then a real call
+// is started in another page with VGA default codec. This is the corner case
+// and happens only when a session is started. We ignore this case currently.
+bool WebRtcVideoEngine::SetDefaultCodec(const VideoCodec& codec) {
+ if (!RebuildCodecList(codec)) {
+ LOG(LS_WARNING) << "Failed to RebuildCodecList";
+ return false;
+ }
+
+ default_codec_format_ = VideoFormat(
+ video_codecs_[0].width,
+ video_codecs_[0].height,
+ VideoFormat::FpsToInterval(video_codecs_[0].framerate),
+ FOURCC_ANY);
+ return true;
+}
+
+WebRtcVideoMediaChannel* WebRtcVideoEngine::CreateChannel(
+ VoiceMediaChannel* voice_channel) {
+ WebRtcVideoMediaChannel* channel =
+ new WebRtcVideoMediaChannel(this, voice_channel);
+ if (!channel->Init()) {
+ delete channel;
+ channel = NULL;
+ }
+ return channel;
+}
+
+bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) {
+ local_renderer_w_ = local_renderer_h_ = 0;
+ local_renderer_ = renderer;
+ return true;
+}
+
+const std::vector<VideoCodec>& WebRtcVideoEngine::codecs() const {
+ return video_codecs_;
+}
+
+const std::vector<RtpHeaderExtension>&
+WebRtcVideoEngine::rtp_header_extensions() const {
+ return rtp_header_extensions_;
+}
+
+void WebRtcVideoEngine::SetLogging(int min_sev, const char* filter) {
+ // if min_sev == -1, we keep the current log level.
+ if (min_sev >= 0) {
+ SetTraceFilter(SeverityToFilter(min_sev));
+ }
+ SetTraceOptions(filter);
+}
+
+int WebRtcVideoEngine::GetLastEngineError() {
+ return vie_wrapper_->error();
+}
+
+// Checks to see whether we comprehend and could receive a particular codec
+bool WebRtcVideoEngine::FindCodec(const VideoCodec& in) {
+ for (int i = 0; i < ARRAY_SIZE(kVideoFormats); ++i) {
+ const VideoFormat fmt(kVideoFormats[i]);
+ if ((in.width == 0 && in.height == 0) ||
+ (fmt.width == in.width && fmt.height == in.height)) {
+ if (encoder_factory_) {
+ const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
+ encoder_factory_->codecs();
+ for (size_t j = 0; j < codecs.size(); ++j) {
+ VideoCodec codec(GetExternalVideoPayloadType(static_cast<int>(j)),
+ codecs[j].name, 0, 0, 0, 0);
+ if (codec.Matches(in))
+ return true;
+ }
+ }
+ for (size_t j = 0; j < ARRAY_SIZE(kVideoCodecPrefs); ++j) {
+ VideoCodec codec(kVideoCodecPrefs[j].payload_type,
+ kVideoCodecPrefs[j].name, 0, 0, 0, 0);
+ if (codec.Matches(in)) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+// Given the requested codec, returns true if we can send that codec type and
+// updates out with the best quality we could send for that codec. If current is
+// not empty, we constrain out so that its aspect ratio matches current's.
+bool WebRtcVideoEngine::CanSendCodec(const VideoCodec& requested,
+ const VideoCodec& current,
+ VideoCodec* out) {
+ if (!out) {
+ return false;
+ }
+
+ std::vector<VideoCodec>::const_iterator local_max;
+ for (local_max = video_codecs_.begin();
+ local_max < video_codecs_.end();
+ ++local_max) {
+ // First match codecs by payload type
+ if (!requested.Matches(*local_max)) {
+ continue;
+ }
+
+ out->id = requested.id;
+ out->name = requested.name;
+ out->preference = requested.preference;
+ out->params = requested.params;
+ out->framerate = talk_base::_min(requested.framerate, local_max->framerate);
+ out->width = 0;
+ out->height = 0;
+ out->params = requested.params;
+ out->feedback_params = requested.feedback_params;
+
+ if (0 == requested.width && 0 == requested.height) {
+ // Special case with resolution 0. The channel should not send frames.
+ return true;
+ } else if (0 == requested.width || 0 == requested.height) {
+ // 0xn and nx0 are invalid resolutions.
+ return false;
+ }
+
+ // Pick the best quality that is within their and our bounds and has the
+ // correct aspect ratio.
+ for (int j = 0; j < ARRAY_SIZE(kVideoFormats); ++j) {
+ const VideoFormat format(kVideoFormats[j]);
+
+ // Skip any format that is larger than the local or remote maximums, or
+ // smaller than the current best match
+ if (format.width > requested.width || format.height > requested.height ||
+ format.width > local_max->width ||
+ (format.width < out->width && format.height < out->height)) {
+ continue;
+ }
+
+ bool better = false;
+
+ // Check any further constraints on this prospective format
+ if (!out->width || !out->height) {
+ // If we don't have any matches yet, this is the best so far.
+ better = true;
+ } else if (current.width && current.height) {
+ // current is set so format must match its ratio exactly.
+ better =
+ (format.width * current.height == format.height * current.width);
+ } else {
+ // Prefer closer aspect ratios i.e
+ // format.aspect - requested.aspect < out.aspect - requested.aspect
+ better = abs(format.width * requested.height * out->height -
+ requested.width * format.height * out->height) <
+ abs(out->width * format.height * requested.height -
+ requested.width * format.height * out->height);
+ }
+
+ if (better) {
+ out->width = format.width;
+ out->height = format.height;
+ }
+ }
+ if (out->width > 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static void ConvertToCricketVideoCodec(
+ const webrtc::VideoCodec& in_codec, VideoCodec* out_codec) {
+ out_codec->id = in_codec.plType;
+ out_codec->name = in_codec.plName;
+ out_codec->width = in_codec.width;
+ out_codec->height = in_codec.height;
+ out_codec->framerate = in_codec.maxFramerate;
+ out_codec->SetParam(kCodecParamMinBitrate, in_codec.minBitrate);
+ out_codec->SetParam(kCodecParamMaxBitrate, in_codec.maxBitrate);
+ if (in_codec.qpMax) {
+ out_codec->SetParam(kCodecParamMaxQuantization, in_codec.qpMax);
+ }
+}
+
+bool WebRtcVideoEngine::ConvertFromCricketVideoCodec(
+ const VideoCodec& in_codec, webrtc::VideoCodec* out_codec) {
+ bool found = false;
+ int ncodecs = vie_wrapper_->codec()->NumberOfCodecs();
+ for (int i = 0; i < ncodecs; ++i) {
+ if (vie_wrapper_->codec()->GetCodec(i, *out_codec) == 0 &&
+ _stricmp(in_codec.name.c_str(), out_codec->plName) == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ // If not found, check if this is supported by external encoder factory.
+ if (!found && encoder_factory_) {
+ const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
+ encoder_factory_->codecs();
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ if (_stricmp(in_codec.name.c_str(), codecs[i].name.c_str()) == 0) {
+ out_codec->codecType = codecs[i].type;
+ out_codec->plType = GetExternalVideoPayloadType(static_cast<int>(i));
+ talk_base::strcpyn(out_codec->plName, sizeof(out_codec->plName),
+ codecs[i].name.c_str(), codecs[i].name.length());
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ LOG(LS_ERROR) << "invalid codec type";
+ return false;
+ }
+
+ if (in_codec.id != 0)
+ out_codec->plType = in_codec.id;
+
+ if (in_codec.width != 0)
+ out_codec->width = in_codec.width;
+
+ if (in_codec.height != 0)
+ out_codec->height = in_codec.height;
+
+ if (in_codec.framerate != 0)
+ out_codec->maxFramerate = in_codec.framerate;
+
+ // Convert bitrate parameters.
+ int max_bitrate = kMaxVideoBitrate;
+ int min_bitrate = kMinVideoBitrate;
+ int start_bitrate = kStartVideoBitrate;
+
+ in_codec.GetParam(kCodecParamMinBitrate, &min_bitrate);
+ in_codec.GetParam(kCodecParamMaxBitrate, &max_bitrate);
+
+ if (max_bitrate < min_bitrate) {
+ return false;
+ }
+ start_bitrate = talk_base::_max(start_bitrate, min_bitrate);
+ start_bitrate = talk_base::_min(start_bitrate, max_bitrate);
+
+ out_codec->minBitrate = min_bitrate;
+ out_codec->startBitrate = start_bitrate;
+ out_codec->maxBitrate = max_bitrate;
+
+ // Convert general codec parameters.
+ int max_quantization = 0;
+ if (in_codec.GetParam(kCodecParamMaxQuantization, &max_quantization)) {
+ if (max_quantization < 0) {
+ return false;
+ }
+ out_codec->qpMax = max_quantization;
+ }
+ return true;
+}
+
+void WebRtcVideoEngine::RegisterChannel(WebRtcVideoMediaChannel *channel) {
+ talk_base::CritScope cs(&channels_crit_);
+ channels_.push_back(channel);
+}
+
+void WebRtcVideoEngine::UnregisterChannel(WebRtcVideoMediaChannel *channel) {
+ talk_base::CritScope cs(&channels_crit_);
+ channels_.erase(std::remove(channels_.begin(), channels_.end(), channel),
+ channels_.end());
+}
+
+bool WebRtcVideoEngine::SetVoiceEngine(WebRtcVoiceEngine* voice_engine) {
+ if (initialized_) {
+ LOG(LS_WARNING) << "SetVoiceEngine can not be called after Init";
+ return false;
+ }
+ voice_engine_ = voice_engine;
+ return true;
+}
+
+bool WebRtcVideoEngine::EnableTimedRender() {
+ if (initialized_) {
+ LOG(LS_WARNING) << "EnableTimedRender can not be called after Init";
+ return false;
+ }
+ render_module_.reset(webrtc::VideoRender::CreateVideoRender(0, NULL,
+ false, webrtc::kRenderExternal));
+ return true;
+}
+
+void WebRtcVideoEngine::SetTraceFilter(int filter) {
+ tracing_->SetTraceFilter(filter);
+}
+
+// See https://sites.google.com/a/google.com/wavelet/
+// Home/Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters
+// for all supported command line setttings.
+void WebRtcVideoEngine::SetTraceOptions(const std::string& options) {
+ // Set WebRTC trace file.
+ std::vector<std::string> opts;
+ talk_base::tokenize(options, ' ', '"', '"', &opts);
+ std::vector<std::string>::iterator tracefile =
+ std::find(opts.begin(), opts.end(), "tracefile");
+ if (tracefile != opts.end() && ++tracefile != opts.end()) {
+ // Write WebRTC debug output (at same loglevel) to file
+ if (tracing_->SetTraceFile(tracefile->c_str()) == -1) {
+ LOG_RTCERR1(SetTraceFile, *tracefile);
+ }
+ }
+}
+
+static void AddDefaultFeedbackParams(VideoCodec* codec) {
+ const FeedbackParam kFir(kRtcpFbParamCcm, kRtcpFbCcmParamFir);
+ codec->AddFeedbackParam(kFir);
+ const FeedbackParam kNack(kRtcpFbParamNack, kParamValueEmpty);
+ codec->AddFeedbackParam(kNack);
+ const FeedbackParam kRemb(kRtcpFbParamRemb, kParamValueEmpty);
+ codec->AddFeedbackParam(kRemb);
+}
+
+// Rebuilds the codec list to be only those that are less intensive
+// than the specified codec.
+bool WebRtcVideoEngine::RebuildCodecList(const VideoCodec& in_codec) {
+ if (!FindCodec(in_codec))
+ return false;
+
+ video_codecs_.clear();
+
+ bool found = false;
+ std::set<std::string> external_codec_names;
+ if (encoder_factory_) {
+ const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
+ encoder_factory_->codecs();
+ for (size_t i = 0; i < codecs.size(); ++i) {
+ if (!found)
+ found = (in_codec.name == codecs[i].name);
+ VideoCodec codec(
+ GetExternalVideoPayloadType(static_cast<int>(i)),
+ codecs[i].name,
+ codecs[i].max_width,
+ codecs[i].max_height,
+ codecs[i].max_fps,
+ static_cast<int>(codecs.size() + ARRAY_SIZE(kVideoCodecPrefs) - i));
+ AddDefaultFeedbackParams(&codec);
+ video_codecs_.push_back(codec);
+ external_codec_names.insert(codecs[i].name);
+ }
+ }
+ for (size_t i = 0; i < ARRAY_SIZE(kVideoCodecPrefs); ++i) {
+ const VideoCodecPref& pref(kVideoCodecPrefs[i]);
+ if (!found)
+ found = (in_codec.name == pref.name);
+ bool is_external_codec = external_codec_names.find(pref.name) !=
+ external_codec_names.end();
+ if (found && !is_external_codec) {
+ VideoCodec codec(pref.payload_type, pref.name,
+ in_codec.width, in_codec.height, in_codec.framerate,
+ static_cast<int>(ARRAY_SIZE(kVideoCodecPrefs) - i));
+ if (_stricmp(kVp8PayloadName, codec.name.c_str()) == 0) {
+ AddDefaultFeedbackParams(&codec);
+ }
+ video_codecs_.push_back(codec);
+ }
+ }
+ ASSERT(found);
+ return true;
+}
+
+// Ignore spammy trace messages, mostly from the stats API when we haven't
+// gotten RTCP info yet from the remote side.
+bool WebRtcVideoEngine::ShouldIgnoreTrace(const std::string& trace) {
+ static const char* const kTracesToIgnore[] = {
+ NULL
+ };
+ for (const char* const* p = kTracesToIgnore; *p; ++p) {
+ if (trace.find(*p) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+int WebRtcVideoEngine::GetNumOfChannels() {
+ talk_base::CritScope cs(&channels_crit_);
+ return static_cast<int>(channels_.size());
+}
+
+void WebRtcVideoEngine::Print(webrtc::TraceLevel level, const char* trace,
+ int length) {
+ talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
+ if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
+ sev = talk_base::LS_ERROR;
+ else if (level == webrtc::kTraceWarning)
+ sev = talk_base::LS_WARNING;
+ else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo)
+ sev = talk_base::LS_INFO;
+ else if (level == webrtc::kTraceTerseInfo)
+ sev = talk_base::LS_INFO;
+
+ // Skip past boilerplate prefix text
+ if (length < 72) {
+ std::string msg(trace, length);
+ LOG(LS_ERROR) << "Malformed webrtc log message: ";
+ LOG_V(sev) << msg;
+ } else {
+ std::string msg(trace + 71, length - 72);
+ if (!ShouldIgnoreTrace(msg) &&
+ (!voice_engine_ || !voice_engine_->ShouldIgnoreTrace(msg))) {
+ LOG_V(sev) << "webrtc: " << msg;
+ }
+ }
+}
+
+webrtc::VideoDecoder* WebRtcVideoEngine::CreateExternalDecoder(
+ webrtc::VideoCodecType type) {
+ if (decoder_factory_ == NULL) {
+ return NULL;
+ }
+ return decoder_factory_->CreateVideoDecoder(type);
+}
+
+void WebRtcVideoEngine::DestroyExternalDecoder(webrtc::VideoDecoder* decoder) {
+ ASSERT(decoder_factory_ != NULL);
+ if (decoder_factory_ == NULL)
+ return;
+ decoder_factory_->DestroyVideoDecoder(decoder);
+}
+
+webrtc::VideoEncoder* WebRtcVideoEngine::CreateExternalEncoder(
+ webrtc::VideoCodecType type) {
+ if (encoder_factory_ == NULL) {
+ return NULL;
+ }
+ return encoder_factory_->CreateVideoEncoder(type);
+}
+
+void WebRtcVideoEngine::DestroyExternalEncoder(webrtc::VideoEncoder* encoder) {
+ ASSERT(encoder_factory_ != NULL);
+ if (encoder_factory_ == NULL)
+ return;
+ encoder_factory_->DestroyVideoEncoder(encoder);
+}
+
+bool WebRtcVideoEngine::IsExternalEncoderCodecType(
+ webrtc::VideoCodecType type) const {
+ if (!encoder_factory_)
+ return false;
+ const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
+ encoder_factory_->codecs();
+ std::vector<WebRtcVideoEncoderFactory::VideoCodec>::const_iterator it;
+ for (it = codecs.begin(); it != codecs.end(); ++it) {
+ if (it->type == type)
+ return true;
+ }
+ return false;
+}
+
+void WebRtcVideoEngine::SetExternalDecoderFactory(
+ WebRtcVideoDecoderFactory* decoder_factory) {
+ decoder_factory_ = decoder_factory;
+}
+
+void WebRtcVideoEngine::SetExternalEncoderFactory(
+ WebRtcVideoEncoderFactory* encoder_factory) {
+ if (encoder_factory_ == encoder_factory)
+ return;
+
+ if (encoder_factory_) {
+ encoder_factory_->RemoveObserver(this);
+ }
+ encoder_factory_ = encoder_factory;
+ if (encoder_factory_) {
+ encoder_factory_->AddObserver(this);
+ }
+
+ // Invoke OnCodecAvailable() here in case the list of codecs is already
+ // available when the encoder factory is installed. If not the encoder
+ // factory will invoke the callback later when the codecs become available.
+ OnCodecsAvailable();
+}
+
+void WebRtcVideoEngine::OnCodecsAvailable() {
+ // Rebuild codec list while reapplying the current default codec format.
+ VideoCodec max_codec(kVideoCodecPrefs[0].payload_type,
+ kVideoCodecPrefs[0].name,
+ video_codecs_[0].width,
+ video_codecs_[0].height,
+ video_codecs_[0].framerate,
+ 0);
+ if (!RebuildCodecList(max_codec)) {
+ LOG(LS_ERROR) << "Failed to initialize list of supported codec types";
+ }
+}
+
+// WebRtcVideoMediaChannel
+
+WebRtcVideoMediaChannel::WebRtcVideoMediaChannel(
+ WebRtcVideoEngine* engine,
+ VoiceMediaChannel* channel)
+ : engine_(engine),
+ voice_channel_(channel),
+ vie_channel_(-1),
+ nack_enabled_(true),
+ remb_enabled_(false),
+ render_started_(false),
+ first_receive_ssrc_(0),
+ send_red_type_(-1),
+ send_fec_type_(-1),
+ send_min_bitrate_(kMinVideoBitrate),
+ send_start_bitrate_(kStartVideoBitrate),
+ send_max_bitrate_(kMaxVideoBitrate),
+ sending_(false),
+ ratio_w_(0),
+ ratio_h_(0) {
+ engine->RegisterChannel(this);
+}
+
+bool WebRtcVideoMediaChannel::Init() {
+ const uint32 ssrc_key = 0;
+ return CreateChannel(ssrc_key, MD_SENDRECV, &vie_channel_);
+}
+
+WebRtcVideoMediaChannel::~WebRtcVideoMediaChannel() {
+ const bool send = false;
+ SetSend(send);
+ const bool render = false;
+ SetRender(render);
+
+ while (!send_channels_.empty()) {
+ if (!DeleteSendChannel(send_channels_.begin()->first)) {
+ LOG(LS_ERROR) << "Unable to delete channel with ssrc key "
+ << send_channels_.begin()->first;
+ ASSERT(false);
+ break;
+ }
+ }
+
+ // Remove all receive streams and the default channel.
+ while (!recv_channels_.empty()) {
+ RemoveRecvStream(recv_channels_.begin()->first);
+ }
+
+ // Unregister the channel from the engine.
+ engine()->UnregisterChannel(this);
+ if (worker_thread()) {
+ worker_thread()->Clear(this);
+ }
+}
+
+bool WebRtcVideoMediaChannel::SetRecvCodecs(
+ const std::vector<VideoCodec>& codecs) {
+ receive_codecs_.clear();
+ for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
+ iter != codecs.end(); ++iter) {
+ if (engine()->FindCodec(*iter)) {
+ webrtc::VideoCodec wcodec;
+ if (engine()->ConvertFromCricketVideoCodec(*iter, &wcodec)) {
+ receive_codecs_.push_back(wcodec);
+ }
+ } else {
+ LOG(LS_INFO) << "Unknown codec " << iter->name;
+ return false;
+ }
+ }
+
+ for (RecvChannelMap::iterator it = recv_channels_.begin();
+ it != recv_channels_.end(); ++it) {
+ if (!SetReceiveCodecs(it->second))
+ return false;
+ }
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetSendCodecs(
+ const std::vector<VideoCodec>& codecs) {
+ // Match with local video codec list.
+ std::vector<webrtc::VideoCodec> send_codecs;
+ VideoCodec checked_codec;
+ VideoCodec current; // defaults to 0x0
+ if (sending_) {
+ ConvertToCricketVideoCodec(*send_codec_, &current);
+ }
+ for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
+ iter != codecs.end(); ++iter) {
+ if (_stricmp(iter->name.c_str(), kRedPayloadName) == 0) {
+ send_red_type_ = iter->id;
+ } else if (_stricmp(iter->name.c_str(), kFecPayloadName) == 0) {
+ send_fec_type_ = iter->id;
+ } else if (engine()->CanSendCodec(*iter, current, &checked_codec)) {
+ webrtc::VideoCodec wcodec;
+ if (engine()->ConvertFromCricketVideoCodec(checked_codec, &wcodec)) {
+ if (send_codecs.empty()) {
+ nack_enabled_ = IsNackEnabled(checked_codec);
+ remb_enabled_ = IsRembEnabled(checked_codec);
+ }
+ send_codecs.push_back(wcodec);
+ }
+ } else {
+ LOG(LS_WARNING) << "Unknown codec " << iter->name;
+ }
+ }
+
+ // Fail if we don't have a match.
+ if (send_codecs.empty()) {
+ LOG(LS_WARNING) << "No matching codecs available";
+ return false;
+ }
+
+ // Recv protection.
+ for (RecvChannelMap::iterator it = recv_channels_.begin();
+ it != recv_channels_.end(); ++it) {
+ int channel_id = it->second->channel_id();
+ if (!SetNackFec(channel_id, send_red_type_, send_fec_type_,
+ nack_enabled_)) {
+ return false;
+ }
+ if (engine_->vie()->rtp()->SetRembStatus(channel_id,
+ kNotSending,
+ remb_enabled_) != 0) {
+ LOG_RTCERR3(SetRembStatus, channel_id, kNotSending, remb_enabled_);
+ return false;
+ }
+ }
+
+ // Send settings.
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ int channel_id = iter->second->channel_id();
+ if (!SetNackFec(channel_id, send_red_type_, send_fec_type_,
+ nack_enabled_)) {
+ return false;
+ }
+ if (engine_->vie()->rtp()->SetRembStatus(channel_id,
+ remb_enabled_,
+ remb_enabled_) != 0) {
+ LOG_RTCERR3(SetRembStatus, channel_id, remb_enabled_, remb_enabled_);
+ return false;
+ }
+ }
+
+ // Select the first matched codec.
+ webrtc::VideoCodec& codec(send_codecs[0]);
+
+ if (!SetSendCodec(
+ codec, codec.minBitrate, codec.startBitrate, codec.maxBitrate)) {
+ return false;
+ }
+
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ send_channel->InitializeAdapterOutputFormat(codec);
+ }
+
+ LogSendCodecChange("SetSendCodecs()");
+
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::GetSendCodec(VideoCodec* send_codec) {
+ if (!send_codec_) {
+ return false;
+ }
+ ConvertToCricketVideoCodec(*send_codec_, send_codec);
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetSendStreamFormat(uint32 ssrc,
+ const VideoFormat& format) {
+ if (!send_codec_) {
+ LOG(LS_ERROR) << "The send codec has not been set yet.";
+ return false;
+ }
+ WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
+ if (!send_channel) {
+ LOG(LS_ERROR) << "The specified ssrc " << ssrc << " is not in use.";
+ return false;
+ }
+ send_channel->set_video_format(format);
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetRender(bool render) {
+ if (render == render_started_) {
+ return true; // no action required
+ }
+
+ bool ret = true;
+ for (RecvChannelMap::iterator it = recv_channels_.begin();
+ it != recv_channels_.end(); ++it) {
+ if (render) {
+ if (engine()->vie()->render()->StartRender(
+ it->second->channel_id()) != 0) {
+ LOG_RTCERR1(StartRender, it->second->channel_id());
+ ret = false;
+ }
+ } else {
+ if (engine()->vie()->render()->StopRender(
+ it->second->channel_id()) != 0) {
+ LOG_RTCERR1(StopRender, it->second->channel_id());
+ ret = false;
+ }
+ }
+ }
+ if (ret) {
+ render_started_ = render;
+ }
+
+ return ret;
+}
+
+bool WebRtcVideoMediaChannel::SetSend(bool send) {
+ if (!HasReadySendChannels() && send) {
+ LOG(LS_ERROR) << "No stream added";
+ return false;
+ }
+ if (send == sending()) {
+ return true; // No action required.
+ }
+
+ if (send) {
+ // We've been asked to start sending.
+ // SetSendCodecs must have been called already.
+ if (!send_codec_) {
+ return false;
+ }
+ // Start send now.
+ if (!StartSend()) {
+ return false;
+ }
+ } else {
+ // We've been asked to stop sending.
+ if (!StopSend()) {
+ return false;
+ }
+ }
+ sending_ = send;
+
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::AddSendStream(const StreamParams& sp) {
+ LOG(LS_INFO) << "AddSendStream " << sp.ToString();
+
+ if (!IsOneSsrcStream(sp)) {
+ LOG(LS_ERROR) << "AddSendStream: bad local stream parameters";
+ return false;
+ }
+
+ uint32 ssrc_key;
+ if (!CreateSendChannelKey(sp.first_ssrc(), &ssrc_key)) {
+ LOG(LS_ERROR) << "Trying to register duplicate ssrc: " << sp.first_ssrc();
+ return false;
+ }
+ // If the default channel is already used for sending create a new channel
+ // otherwise use the default channel for sending.
+ int channel_id = -1;
+ if (send_channels_[0]->stream_params() == NULL) {
+ channel_id = vie_channel_;
+ } else {
+ if (!CreateChannel(ssrc_key, MD_SEND, &channel_id)) {
+ LOG(LS_ERROR) << "AddSendStream: unable to create channel";
+ return false;
+ }
+ }
+ WebRtcVideoChannelSendInfo* send_channel = send_channels_[ssrc_key];
+ // Set the send (local) SSRC.
+ // If there are multiple send SSRCs, we can only set the first one here, and
+ // the rest of the SSRC(s) need to be set after SetSendCodec has been called
+ // (with a codec requires multiple SSRC(s)).
+ if (engine()->vie()->rtp()->SetLocalSSRC(channel_id,
+ sp.first_ssrc()) != 0) {
+ LOG_RTCERR2(SetLocalSSRC, channel_id, sp.first_ssrc());
+ return false;
+ }
+
+ // Set RTCP CName.
+ if (engine()->vie()->rtp()->SetRTCPCName(channel_id,
+ sp.cname.c_str()) != 0) {
+ LOG_RTCERR2(SetRTCPCName, channel_id, sp.cname.c_str());
+ return false;
+ }
+
+ // At this point the channel's local SSRC has been updated. If the channel is
+ // the default channel make sure that all the receive channels are updated as
+ // well. Receive channels have to have the same SSRC as the default channel in
+ // order to send receiver reports with this SSRC.
+ if (IsDefaultChannel(channel_id)) {
+ for (RecvChannelMap::const_iterator it = recv_channels_.begin();
+ it != recv_channels_.end(); ++it) {
+ WebRtcVideoChannelRecvInfo* info = it->second;
+ int channel_id = info->channel_id();
+ if (engine()->vie()->rtp()->SetLocalSSRC(channel_id,
+ sp.first_ssrc()) != 0) {
+ LOG_RTCERR1(SetLocalSSRC, it->first);
+ return false;
+ }
+ }
+ }
+
+ send_channel->set_stream_params(sp);
+
+ // Reset send codec after stream parameters changed.
+ if (send_codec_) {
+ if (!SetSendCodec(send_channel, *send_codec_, send_min_bitrate_,
+ send_start_bitrate_, send_max_bitrate_)) {
+ return false;
+ }
+ LogSendCodecChange("SetSendStreamFormat()");
+ }
+
+ if (sending_) {
+ return StartSend(send_channel);
+ }
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::RemoveSendStream(uint32 ssrc) {
+ uint32 ssrc_key;
+ if (!GetSendChannelKey(ssrc, &ssrc_key)) {
+ LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
+ << " which doesn't exist.";
+ return false;
+ }
+ WebRtcVideoChannelSendInfo* send_channel = send_channels_[ssrc_key];
+ int channel_id = send_channel->channel_id();
+ if (IsDefaultChannel(channel_id) && (send_channel->stream_params() == NULL)) {
+ // Default channel will still exist. However, if stream_params() is NULL
+ // there is no stream to remove.
+ return false;
+ }
+ if (sending_) {
+ StopSend(send_channel);
+ }
+
+ const WebRtcVideoChannelSendInfo::EncoderMap& encoder_map =
+ send_channel->registered_encoders();
+ for (WebRtcVideoChannelSendInfo::EncoderMap::const_iterator it =
+ encoder_map.begin(); it != encoder_map.end(); ++it) {
+ if (engine()->vie()->ext_codec()->DeRegisterExternalSendCodec(
+ channel_id, it->first) != 0) {
+ LOG_RTCERR1(DeregisterEncoderObserver, channel_id);
+ }
+ engine()->DestroyExternalEncoder(it->second);
+ }
+ send_channel->ClearRegisteredEncoders();
+
+ // The receive channels depend on the default channel, recycle it instead.
+ if (IsDefaultChannel(channel_id)) {
+ SetCapturer(GetDefaultChannelSsrc(), NULL);
+ send_channel->ClearStreamParams();
+ } else {
+ return DeleteSendChannel(ssrc_key);
+ }
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::AddRecvStream(const StreamParams& sp) {
+ // TODO(zhurunz) Remove this once BWE works properly across different send
+ // and receive channels.
+ // Reuse default channel for recv stream in 1:1 call.
+ if (!InConferenceMode() && first_receive_ssrc_ == 0) {
+ LOG(LS_INFO) << "Recv stream " << sp.first_ssrc()
+ << " reuse default channel #"
+ << vie_channel_;
+ first_receive_ssrc_ = sp.first_ssrc();
+ if (render_started_) {
+ if (engine()->vie()->render()->StartRender(vie_channel_) !=0) {
+ LOG_RTCERR1(StartRender, vie_channel_);
+ }
+ }
+ return true;
+ }
+
+ if (recv_channels_.find(sp.first_ssrc()) != recv_channels_.end() ||
+ first_receive_ssrc_ == sp.first_ssrc()) {
+ LOG(LS_ERROR) << "Stream already exists";
+ return false;
+ }
+
+ // TODO(perkj): Implement recv media from multiple SSRCs per stream.
+ if (sp.ssrcs.size() != 1) {
+ LOG(LS_ERROR) << "WebRtcVideoMediaChannel supports one receiving SSRC per"
+ << " stream";
+ return false;
+ }
+
+ // Create a new channel for receiving video data.
+ // In order to get the bandwidth estimation work fine for
+ // receive only channels, we connect all receiving channels
+ // to our master send channel.
+ int channel_id = -1;
+ if (!CreateChannel(sp.first_ssrc(), MD_RECV, &channel_id)) {
+ return false;
+ }
+
+ // Get the default renderer.
+ VideoRenderer* default_renderer = NULL;
+ if (InConferenceMode()) {
+ // The recv_channels_ size start out being 1, so if it is two here this
+ // is the first receive channel created (vie_channel_ is not used for
+ // receiving in a conference call). This means that the renderer stored
+ // inside vie_channel_ should be used for the just created channel.
+ if (recv_channels_.size() == 2 &&
+ recv_channels_.find(0) != recv_channels_.end()) {
+ GetRenderer(0, &default_renderer);
+ }
+ }
+
+ // The first recv stream reuses the default renderer (if a default renderer
+ // has been set).
+ if (default_renderer) {
+ SetRenderer(sp.first_ssrc(), default_renderer);
+ }
+
+ LOG(LS_INFO) << "New video stream " << sp.first_ssrc()
+ << " registered to VideoEngine channel #"
+ << channel_id << " and connected to channel #" << vie_channel_;
+
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::RemoveRecvStream(uint32 ssrc) {
+ RecvChannelMap::iterator it = recv_channels_.find(ssrc);
+
+ if (it == recv_channels_.end()) {
+ // TODO(perkj): Remove this once BWE works properly across different send
+ // and receive channels.
+ // The default channel is reused for recv stream in 1:1 call.
+ if (first_receive_ssrc_ == ssrc) {
+ first_receive_ssrc_ = 0;
+ // Need to stop the renderer and remove it since the render window can be
+ // deleted after this.
+ if (render_started_) {
+ if (engine()->vie()->render()->StopRender(vie_channel_) !=0) {
+ LOG_RTCERR1(StopRender, it->second->channel_id());
+ }
+ }
+ recv_channels_[0]->SetRenderer(NULL);
+ return true;
+ }
+ return false;
+ }
+ WebRtcVideoChannelRecvInfo* info = it->second;
+ int channel_id = info->channel_id();
+ if (engine()->vie()->render()->RemoveRenderer(channel_id) != 0) {
+ LOG_RTCERR1(RemoveRenderer, channel_id);
+ }
+
+ if (engine()->vie()->network()->DeregisterSendTransport(channel_id) !=0) {
+ LOG_RTCERR1(DeRegisterSendTransport, channel_id);
+ }
+
+ if (engine()->vie()->codec()->DeregisterDecoderObserver(
+ channel_id) != 0) {
+ LOG_RTCERR1(DeregisterDecoderObserver, channel_id);
+ }
+
+ const WebRtcVideoChannelRecvInfo::DecoderMap& decoder_map =
+ info->registered_decoders();
+ for (WebRtcVideoChannelRecvInfo::DecoderMap::const_iterator it =
+ decoder_map.begin(); it != decoder_map.end(); ++it) {
+ if (engine()->vie()->ext_codec()->DeRegisterExternalReceiveCodec(
+ channel_id, it->first) != 0) {
+ LOG_RTCERR1(DeregisterDecoderObserver, channel_id);
+ }
+ engine()->DestroyExternalDecoder(it->second);
+ }
+ info->ClearRegisteredDecoders();
+
+ LOG(LS_INFO) << "Removing video stream " << ssrc
+ << " with VideoEngine channel #"
+ << channel_id;
+ if (engine()->vie()->base()->DeleteChannel(channel_id) == -1) {
+ LOG_RTCERR1(DeleteChannel, channel_id);
+ // Leak the WebRtcVideoChannelRecvInfo owned by |it| but remove the channel
+ // from recv_channels_.
+ recv_channels_.erase(it);
+ return false;
+ }
+ // Delete the WebRtcVideoChannelRecvInfo pointed to by it->second.
+ delete info;
+ recv_channels_.erase(it);
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::StartSend() {
+ bool success = true;
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ if (!StartSend(send_channel)) {
+ success = false;
+ }
+ }
+ return success;
+}
+
+bool WebRtcVideoMediaChannel::StartSend(
+ WebRtcVideoChannelSendInfo* send_channel) {
+ const int channel_id = send_channel->channel_id();
+ if (engine()->vie()->base()->StartSend(channel_id) != 0) {
+ LOG_RTCERR1(StartSend, channel_id);
+ return false;
+ }
+
+ send_channel->set_sending(true);
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::StopSend() {
+ bool success = true;
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ if (!StopSend(send_channel)) {
+ success = false;
+ }
+ }
+ return success;
+}
+
+bool WebRtcVideoMediaChannel::StopSend(
+ WebRtcVideoChannelSendInfo* send_channel) {
+ const int channel_id = send_channel->channel_id();
+ if (engine()->vie()->base()->StopSend(channel_id) != 0) {
+ LOG_RTCERR1(StopSend, channel_id);
+ return false;
+ }
+ send_channel->set_sending(false);
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SendIntraFrame() {
+ bool success = true;
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end();
+ ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ const int channel_id = send_channel->channel_id();
+ if (engine()->vie()->codec()->SendKeyFrame(channel_id) != 0) {
+ LOG_RTCERR1(SendKeyFrame, channel_id);
+ success = false;
+ }
+ }
+ return success;
+}
+
+bool WebRtcVideoMediaChannel::IsOneSsrcStream(const StreamParams& sp) {
+ return (sp.ssrcs.size() == 1 && sp.ssrc_groups.size() == 0);
+}
+
+bool WebRtcVideoMediaChannel::HasReadySendChannels() {
+ return !send_channels_.empty() &&
+ ((send_channels_.size() > 1) ||
+ (send_channels_[0]->stream_params() != NULL));
+}
+
+bool WebRtcVideoMediaChannel::GetSendChannelKey(uint32 local_ssrc,
+ uint32* key) {
+ *key = 0;
+ // If a send channel is not ready to send it will not have local_ssrc
+ // registered to it.
+ if (!HasReadySendChannels()) {
+ return false;
+ }
+ // The default channel is stored with key 0. The key therefore does not match
+ // the SSRC associated with the default channel. Check if the SSRC provided
+ // corresponds to the default channel's SSRC.
+ if (local_ssrc == GetDefaultChannelSsrc()) {
+ return true;
+ }
+ if (send_channels_.find(local_ssrc) == send_channels_.end()) {
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ if (send_channel->has_ssrc(local_ssrc)) {
+ *key = iter->first;
+ return true;
+ }
+ }
+ return false;
+ }
+ // The key was found in the above std::map::find call. This means that the
+ // ssrc is the key.
+ *key = local_ssrc;
+ return true;
+}
+
+WebRtcVideoChannelSendInfo* WebRtcVideoMediaChannel::GetSendChannel(
+ VideoCapturer* video_capturer) {
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ if (send_channel->video_capturer() == video_capturer) {
+ return send_channel;
+ }
+ }
+ return NULL;
+}
+
+WebRtcVideoChannelSendInfo* WebRtcVideoMediaChannel::GetSendChannel(
+ uint32 local_ssrc) {
+ uint32 key;
+ if (!GetSendChannelKey(local_ssrc, &key)) {
+ return NULL;
+ }
+ return send_channels_[key];
+}
+
+bool WebRtcVideoMediaChannel::CreateSendChannelKey(uint32 local_ssrc,
+ uint32* key) {
+ if (GetSendChannelKey(local_ssrc, key)) {
+ // If there is a key corresponding to |local_ssrc|, the SSRC is already in
+ // use. SSRCs need to be unique in a session and at this point a duplicate
+ // SSRC has been detected.
+ return false;
+ }
+ if (send_channels_[0]->stream_params() == NULL) {
+ // key should be 0 here as the default channel should be re-used whenever it
+ // is not used.
+ *key = 0;
+ return true;
+ }
+ // SSRC is currently not in use and the default channel is already in use. Use
+ // the SSRC as key since it is supposed to be unique in a session.
+ *key = local_ssrc;
+ return true;
+}
+
+uint32 WebRtcVideoMediaChannel::GetDefaultChannelSsrc() {
+ WebRtcVideoChannelSendInfo* send_channel = send_channels_[0];
+ const StreamParams* sp = send_channel->stream_params();
+ if (sp == NULL) {
+ // This happens if no send stream is currently registered.
+ return 0;
+ }
+ return sp->first_ssrc();
+}
+
+bool WebRtcVideoMediaChannel::DeleteSendChannel(uint32 ssrc_key) {
+ if (send_channels_.find(ssrc_key) == send_channels_.end()) {
+ return false;
+ }
+ WebRtcVideoChannelSendInfo* send_channel = send_channels_[ssrc_key];
+ VideoCapturer* capturer = send_channel->video_capturer();
+ if (capturer != NULL) {
+ capturer->SignalVideoFrame.disconnect(this);
+ send_channel->set_video_capturer(NULL);
+ }
+
+ int channel_id = send_channel->channel_id();
+ int capture_id = send_channel->capture_id();
+ if (engine()->vie()->codec()->DeregisterEncoderObserver(
+ channel_id) != 0) {
+ LOG_RTCERR1(DeregisterEncoderObserver, channel_id);
+ }
+
+ // Destroy the external capture interface.
+ if (engine()->vie()->capture()->DisconnectCaptureDevice(
+ channel_id) != 0) {
+ LOG_RTCERR1(DisconnectCaptureDevice, channel_id);
+ }
+ if (engine()->vie()->capture()->ReleaseCaptureDevice(
+ capture_id) != 0) {
+ LOG_RTCERR1(ReleaseCaptureDevice, capture_id);
+ }
+
+ // The default channel is stored in both |send_channels_| and
+ // |recv_channels_|. To make sure it is only deleted once from vie let the
+ // delete call happen when tearing down |recv_channels_| and not here.
+ if (!IsDefaultChannel(channel_id)) {
+ engine_->vie()->base()->DeleteChannel(channel_id);
+ }
+ delete send_channel;
+ send_channels_.erase(ssrc_key);
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::RemoveCapturer(uint32 ssrc) {
+ WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
+ if (!send_channel) {
+ return false;
+ }
+ VideoCapturer* capturer = send_channel->video_capturer();
+ if (capturer == NULL) {
+ return false;
+ }
+ capturer->SignalVideoFrame.disconnect(this);
+ send_channel->set_video_capturer(NULL);
+ const int64 timestamp = send_channel->local_stream_info()->time_stamp();
+ if (send_codec_) {
+ QueueBlackFrame(ssrc, timestamp, send_codec_->maxFramerate);
+ }
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetRenderer(uint32 ssrc,
+ VideoRenderer* renderer) {
+ if (recv_channels_.find(ssrc) == recv_channels_.end()) {
+ // TODO(perkj): Remove this once BWE works properly across different send
+ // and receive channels.
+ // The default channel is reused for recv stream in 1:1 call.
+ if (first_receive_ssrc_ == ssrc &&
+ recv_channels_.find(0) != recv_channels_.end()) {
+ LOG(LS_INFO) << "SetRenderer " << ssrc
+ << " reuse default channel #"
+ << vie_channel_;
+ recv_channels_[0]->SetRenderer(renderer);
+ return true;
+ }
+ return false;
+ }
+
+ recv_channels_[ssrc]->SetRenderer(renderer);
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::GetStats(VideoMediaInfo* info) {
+ // Get sender statistics and build VideoSenderInfo.
+ unsigned int total_bitrate_sent = 0;
+ unsigned int video_bitrate_sent = 0;
+ unsigned int fec_bitrate_sent = 0;
+ unsigned int nack_bitrate_sent = 0;
+ unsigned int estimated_send_bandwidth = 0;
+ unsigned int target_enc_bitrate = 0;
+ if (send_codec_) {
+ for (SendChannelMap::const_iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ const int channel_id = send_channel->channel_id();
+ VideoSenderInfo sinfo;
+ const StreamParams* send_params = send_channel->stream_params();
+ if (send_params == NULL) {
+ // This should only happen if the default vie channel is not in use.
+ // This can happen if no streams have ever been added or the stream
+ // corresponding to the default channel has been removed. Note that
+ // there may be non-default vie channels in use when this happen so
+ // asserting send_channels_.size() == 1 is not correct and neither is
+ // breaking out of the loop.
+ ASSERT(channel_id == vie_channel_);
+ continue;
+ }
+ unsigned int bytes_sent, packets_sent, bytes_recv, packets_recv;
+ if (engine_->vie()->rtp()->GetRTPStatistics(channel_id, bytes_sent,
+ packets_sent, bytes_recv,
+ packets_recv) != 0) {
+ LOG_RTCERR1(GetRTPStatistics, vie_channel_);
+ continue;
+ }
+ WebRtcLocalStreamInfo* channel_stream_info =
+ send_channel->local_stream_info();
+
+ sinfo.ssrcs = send_params->ssrcs;
+ sinfo.codec_name = send_codec_->plName;
+ sinfo.bytes_sent = bytes_sent;
+ sinfo.packets_sent = packets_sent;
+ sinfo.packets_cached = -1;
+ sinfo.packets_lost = -1;
+ sinfo.fraction_lost = -1;
+ sinfo.firs_rcvd = -1;
+ sinfo.nacks_rcvd = -1;
+ sinfo.rtt_ms = -1;
+ sinfo.frame_width = static_cast<int>(channel_stream_info->width());
+ sinfo.frame_height = static_cast<int>(channel_stream_info->height());
+ sinfo.framerate_input = channel_stream_info->framerate();
+ sinfo.framerate_sent = send_channel->encoder_observer()->framerate();
+ sinfo.nominal_bitrate = send_channel->encoder_observer()->bitrate();
+ sinfo.preferred_bitrate = send_max_bitrate_;
+ sinfo.adapt_reason = send_channel->CurrentAdaptReason();
+
+ // Get received RTCP statistics for the sender, if available.
+ // It's not a fatal error if we can't, since RTCP may not have arrived
+ // yet.
+ uint16 r_fraction_lost;
+ unsigned int r_cumulative_lost;
+ unsigned int r_extended_max;
+ unsigned int r_jitter;
+ int r_rtt_ms;
+
+ if (engine_->vie()->rtp()->GetSentRTCPStatistics(
+ channel_id,
+ r_fraction_lost,
+ r_cumulative_lost,
+ r_extended_max,
+ r_jitter, r_rtt_ms) == 0) {
+ // Convert Q8 to float.
+ sinfo.packets_lost = r_cumulative_lost;
+ sinfo.fraction_lost = static_cast<float>(r_fraction_lost) / (1 << 8);
+ sinfo.rtt_ms = r_rtt_ms;
+ }
+ info->senders.push_back(sinfo);
+
+ unsigned int channel_total_bitrate_sent = 0;
+ unsigned int channel_video_bitrate_sent = 0;
+ unsigned int channel_fec_bitrate_sent = 0;
+ unsigned int channel_nack_bitrate_sent = 0;
+ if (engine_->vie()->rtp()->GetBandwidthUsage(
+ channel_id, channel_total_bitrate_sent, channel_video_bitrate_sent,
+ channel_fec_bitrate_sent, channel_nack_bitrate_sent) == 0) {
+ total_bitrate_sent += channel_total_bitrate_sent;
+ video_bitrate_sent += channel_video_bitrate_sent;
+ fec_bitrate_sent += channel_fec_bitrate_sent;
+ nack_bitrate_sent += channel_nack_bitrate_sent;
+ } else {
+ LOG_RTCERR1(GetBandwidthUsage, channel_id);
+ }
+
+ unsigned int estimated_stream_send_bandwidth = 0;
+ if (engine_->vie()->rtp()->GetEstimatedSendBandwidth(
+ channel_id, &estimated_stream_send_bandwidth) == 0) {
+ estimated_send_bandwidth += estimated_stream_send_bandwidth;
+ } else {
+ LOG_RTCERR1(GetEstimatedSendBandwidth, channel_id);
+ }
+ unsigned int target_enc_stream_bitrate = 0;
+ if (engine_->vie()->codec()->GetCodecTargetBitrate(
+ channel_id, &target_enc_stream_bitrate) == 0) {
+ target_enc_bitrate += target_enc_stream_bitrate;
+ } else {
+ LOG_RTCERR1(GetCodecTargetBitrate, channel_id);
+ }
+ }
+ } else {
+ LOG(LS_WARNING) << "GetStats: sender information not ready.";
+ }
+
+ // Get the SSRC and stats for each receiver, based on our own calculations.
+ unsigned int estimated_recv_bandwidth = 0;
+ for (RecvChannelMap::const_iterator it = recv_channels_.begin();
+ it != recv_channels_.end(); ++it) {
+ // Don't report receive statistics from the default channel if we have
+ // specified receive channels.
+ if (it->first == 0 && recv_channels_.size() > 1)
+ continue;
+ WebRtcVideoChannelRecvInfo* channel = it->second;
+
+ unsigned int ssrc;
+ // Get receiver statistics and build VideoReceiverInfo, if we have data.
+ if (engine_->vie()->rtp()->GetRemoteSSRC(channel->channel_id(), ssrc) != 0)
+ continue;
+
+ unsigned int bytes_sent, packets_sent, bytes_recv, packets_recv;
+ if (engine_->vie()->rtp()->GetRTPStatistics(
+ channel->channel_id(), bytes_sent, packets_sent, bytes_recv,
+ packets_recv) != 0) {
+ LOG_RTCERR1(GetRTPStatistics, channel->channel_id());
+ return false;
+ }
+ VideoReceiverInfo rinfo;
+ rinfo.ssrcs.push_back(ssrc);
+ rinfo.bytes_rcvd = bytes_recv;
+ rinfo.packets_rcvd = packets_recv;
+ rinfo.packets_lost = -1;
+ rinfo.packets_concealed = -1;
+ rinfo.fraction_lost = -1; // from SentRTCP
+ rinfo.firs_sent = channel->decoder_observer()->firs_requested();
+ rinfo.nacks_sent = -1;
+ rinfo.frame_width = channel->render_adapter()->width();
+ rinfo.frame_height = channel->render_adapter()->height();
+ rinfo.framerate_rcvd = channel->decoder_observer()->framerate();
+ int fps = channel->render_adapter()->framerate();
+ rinfo.framerate_decoded = fps;
+ rinfo.framerate_output = fps;
+
+ // Get sent RTCP statistics.
+ uint16 s_fraction_lost;
+ unsigned int s_cumulative_lost;
+ unsigned int s_extended_max;
+ unsigned int s_jitter;
+ int s_rtt_ms;
+ if (engine_->vie()->rtp()->GetReceivedRTCPStatistics(channel->channel_id(),
+ s_fraction_lost, s_cumulative_lost, s_extended_max,
+ s_jitter, s_rtt_ms) == 0) {
+ // Convert Q8 to float.
+ rinfo.packets_lost = s_cumulative_lost;
+ rinfo.fraction_lost = static_cast<float>(s_fraction_lost) / (1 << 8);
+ }
+ info->receivers.push_back(rinfo);
+
+ unsigned int estimated_recv_stream_bandwidth = 0;
+ if (engine_->vie()->rtp()->GetEstimatedReceiveBandwidth(
+ channel->channel_id(), &estimated_recv_stream_bandwidth) == 0) {
+ estimated_recv_bandwidth += estimated_recv_stream_bandwidth;
+ } else {
+ LOG_RTCERR1(GetEstimatedReceiveBandwidth, channel->channel_id());
+ }
+ }
+
+ // Build BandwidthEstimationInfo.
+ // TODO(zhurunz): Add real unittest for this.
+ BandwidthEstimationInfo bwe;
+
+ // Calculations done above per send/receive stream.
+ bwe.actual_enc_bitrate = video_bitrate_sent;
+ bwe.transmit_bitrate = total_bitrate_sent;
+ bwe.retransmit_bitrate = nack_bitrate_sent;
+ bwe.available_send_bandwidth = estimated_send_bandwidth;
+ bwe.available_recv_bandwidth = estimated_recv_bandwidth;
+ bwe.target_enc_bitrate = target_enc_bitrate;
+
+ info->bw_estimations.push_back(bwe);
+
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetCapturer(uint32 ssrc,
+ VideoCapturer* capturer) {
+ ASSERT(ssrc != 0);
+ if (!capturer) {
+ return RemoveCapturer(ssrc);
+ }
+ WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
+ if (!send_channel) {
+ return false;
+ }
+ VideoCapturer* old_capturer = send_channel->video_capturer();
+ if (old_capturer) {
+ old_capturer->SignalVideoFrame.disconnect(this);
+ }
+
+ send_channel->set_video_capturer(capturer);
+ capturer->SignalVideoFrame.connect(
+ this,
+ &WebRtcVideoMediaChannel::AdaptAndSendFrame);
+ if (!capturer->IsScreencast() && ratio_w_ != 0 && ratio_h_ != 0) {
+ capturer->UpdateAspectRatio(ratio_w_, ratio_h_);
+ }
+ const int64 timestamp = send_channel->local_stream_info()->time_stamp();
+ if (send_codec_) {
+ QueueBlackFrame(ssrc, timestamp, send_codec_->maxFramerate);
+ }
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::RequestIntraFrame() {
+ // There is no API exposed to application to request a key frame
+ // ViE does this internally when there are errors from decoder
+ return false;
+}
+
+void WebRtcVideoMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
+ // Pick which channel to send this packet to. If this packet doesn't match
+ // any multiplexed streams, just send it to the default channel. Otherwise,
+ // send it to the specific decoder instance for that stream.
+ uint32 ssrc = 0;
+ if (!GetRtpSsrc(packet->data(), packet->length(), &ssrc))
+ return;
+ int which_channel = GetRecvChannelNum(ssrc);
+ if (which_channel == -1) {
+ which_channel = video_channel();
+ }
+
+ engine()->vie()->network()->ReceivedRTPPacket(
+ which_channel,
+ packet->data(),
+ static_cast<int>(packet->length()));
+}
+
+void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
+// Sending channels need all RTCP packets with feedback information.
+// Even sender reports can contain attached report blocks.
+// Receiving channels need sender reports in order to create
+// correct receiver reports.
+
+ uint32 ssrc = 0;
+ if (!GetRtcpSsrc(packet->data(), packet->length(), &ssrc)) {
+ LOG(LS_WARNING) << "Failed to parse SSRC from received RTCP packet";
+ return;
+ }
+ int type = 0;
+ if (!GetRtcpType(packet->data(), packet->length(), &type)) {
+ LOG(LS_WARNING) << "Failed to parse type from received RTCP packet";
+ return;
+ }
+
+ // If it is a sender report, find the channel that is listening.
+ if (type == kRtcpTypeSR) {
+ int which_channel = GetRecvChannelNum(ssrc);
+ if (which_channel != -1 && !IsDefaultChannel(which_channel)) {
+ engine_->vie()->network()->ReceivedRTCPPacket(
+ which_channel,
+ packet->data(),
+ static_cast<int>(packet->length()));
+ }
+ }
+ // SR may continue RR and any RR entry may correspond to any one of the send
+ // channels. So all RTCP packets must be forwarded all send channels. ViE
+ // will filter out RR internally.
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ int channel_id = send_channel->channel_id();
+ engine_->vie()->network()->ReceivedRTCPPacket(
+ channel_id,
+ packet->data(),
+ static_cast<int>(packet->length()));
+ }
+}
+
+void WebRtcVideoMediaChannel::OnReadyToSend(bool ready) {
+ SetNetworkTransmissionState(ready);
+}
+
+bool WebRtcVideoMediaChannel::MuteStream(uint32 ssrc, bool muted) {
+ WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
+ if (!send_channel) {
+ LOG(LS_ERROR) << "The specified ssrc " << ssrc << " is not in use.";
+ return false;
+ }
+ send_channel->set_muted(muted);
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetRecvRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions) {
+ if (receive_extensions_ == extensions) {
+ return true;
+ }
+ receive_extensions_ = extensions;
+
+ const RtpHeaderExtension* offset_extension =
+ FindHeaderExtension(extensions, kRtpTimestampOffsetHeaderExtension);
+ const RtpHeaderExtension* send_time_extension =
+ FindHeaderExtension(extensions, kRtpAbsoluteSendTimeHeaderExtension);
+
+ // Loop through all receive channels and enable/disable the extensions.
+ for (RecvChannelMap::iterator channel_it = recv_channels_.begin();
+ channel_it != recv_channels_.end(); ++channel_it) {
+ int channel_id = channel_it->second->channel_id();
+ if (!SetHeaderExtension(
+ &webrtc::ViERTP_RTCP::SetReceiveTimestampOffsetStatus, channel_id,
+ offset_extension)) {
+ return false;
+ }
+ if (!SetHeaderExtension(
+ &webrtc::ViERTP_RTCP::SetReceiveAbsoluteSendTimeStatus, channel_id,
+ send_time_extension)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetSendRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions) {
+ send_extensions_ = extensions;
+
+ const RtpHeaderExtension* offset_extension =
+ FindHeaderExtension(extensions, kRtpTimestampOffsetHeaderExtension);
+ const RtpHeaderExtension* send_time_extension =
+ FindHeaderExtension(extensions, kRtpAbsoluteSendTimeHeaderExtension);
+
+ // Loop through all send channels and enable/disable the extensions.
+ for (SendChannelMap::iterator channel_it = send_channels_.begin();
+ channel_it != send_channels_.end(); ++channel_it) {
+ int channel_id = channel_it->second->channel_id();
+ if (!SetHeaderExtension(
+ &webrtc::ViERTP_RTCP::SetSendTimestampOffsetStatus, channel_id,
+ offset_extension)) {
+ return false;
+ }
+ if (!SetHeaderExtension(
+ &webrtc::ViERTP_RTCP::SetSendAbsoluteSendTimeStatus, channel_id,
+ send_time_extension)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetSendBandwidth(bool autobw, int bps) {
+ LOG(LS_INFO) << "WebRtcVideoMediaChanne::SetSendBandwidth";
+
+ if (InConferenceMode()) {
+ LOG(LS_INFO) << "Conference mode ignores SetSendBandWidth";
+ return true;
+ }
+
+ if (!send_codec_) {
+ LOG(LS_INFO) << "The send codec has not been set up yet";
+ return true;
+ }
+
+ int min_bitrate;
+ int start_bitrate;
+ int max_bitrate;
+ if (autobw) {
+ // Use the default values for min bitrate.
+ min_bitrate = kMinVideoBitrate;
+ // Use the default value or the bps for the max
+ max_bitrate = (bps <= 0) ? send_max_bitrate_ : (bps / 1000);
+ // Maximum start bitrate can be kStartVideoBitrate.
+ start_bitrate = talk_base::_min(kStartVideoBitrate, max_bitrate);
+ } else {
+ // Use the default start or the bps as the target bitrate.
+ int target_bitrate = (bps <= 0) ? kStartVideoBitrate : (bps / 1000);
+ min_bitrate = target_bitrate;
+ start_bitrate = target_bitrate;
+ max_bitrate = target_bitrate;
+ }
+
+ if (!SetSendCodec(*send_codec_, min_bitrate, start_bitrate, max_bitrate)) {
+ return false;
+ }
+ LogSendCodecChange("SetSendBandwidth()");
+
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetOptions(const VideoOptions &options) {
+ // Always accept options that are unchanged.
+ if (options_ == options) {
+ return true;
+ }
+
+ // Trigger SetSendCodec to set correct noise reduction state if the option has
+ // changed.
+ bool denoiser_changed = options.video_noise_reduction.IsSet() &&
+ (options_.video_noise_reduction != options.video_noise_reduction);
+
+ bool leaky_bucket_changed = options.video_leaky_bucket.IsSet() &&
+ (options_.video_leaky_bucket != options.video_leaky_bucket);
+
+ bool buffer_latency_changed = options.buffered_mode_latency.IsSet() &&
+ (options_.buffered_mode_latency != options.buffered_mode_latency);
+
+ bool conference_mode_turned_off = false;
+ if (options_.conference_mode.IsSet() && options.conference_mode.IsSet() &&
+ options_.conference_mode.GetWithDefaultIfUnset(false) &&
+ !options.conference_mode.GetWithDefaultIfUnset(false)) {
+ conference_mode_turned_off = true;
+ }
+
+ // Save the options, to be interpreted where appropriate.
+ // Use options_.SetAll() instead of assignment so that unset value in options
+ // will not overwrite the previous option value.
+ options_.SetAll(options);
+
+ // Set CPU options for all send channels.
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ send_channel->ApplyCpuOptions(options_);
+ }
+
+ // Adjust send codec bitrate if needed.
+ int conf_max_bitrate = kDefaultConferenceModeMaxVideoBitrate;
+
+ int expected_bitrate = send_max_bitrate_;
+ if (InConferenceMode()) {
+ expected_bitrate = conf_max_bitrate;
+ } else if (conference_mode_turned_off) {
+ // This is a special case for turning conference mode off.
+ // Max bitrate should go back to the default maximum value instead
+ // of the current maximum.
+ expected_bitrate = kMaxVideoBitrate;
+ }
+
+ if (send_codec_ &&
+ (send_max_bitrate_ != expected_bitrate || denoiser_changed)) {
+ // On success, SetSendCodec() will reset send_max_bitrate_ to
+ // expected_bitrate.
+ if (!SetSendCodec(*send_codec_,
+ send_min_bitrate_,
+ send_start_bitrate_,
+ expected_bitrate)) {
+ return false;
+ }
+ LogSendCodecChange("SetOptions()");
+ }
+ if (leaky_bucket_changed) {
+ bool enable_leaky_bucket =
+ options_.video_leaky_bucket.GetWithDefaultIfUnset(false);
+ for (SendChannelMap::iterator it = send_channels_.begin();
+ it != send_channels_.end(); ++it) {
+ if (engine()->vie()->rtp()->SetTransmissionSmoothingStatus(
+ it->second->channel_id(), enable_leaky_bucket) != 0) {
+ LOG_RTCERR2(SetTransmissionSmoothingStatus, it->second->channel_id(),
+ enable_leaky_bucket);
+ }
+ }
+ }
+ if (buffer_latency_changed) {
+ int buffer_latency =
+ options_.buffered_mode_latency.GetWithDefaultIfUnset(
+ cricket::kBufferedModeDisabled);
+ for (SendChannelMap::iterator it = send_channels_.begin();
+ it != send_channels_.end(); ++it) {
+ if (engine()->vie()->rtp()->SetSenderBufferingMode(
+ it->second->channel_id(), buffer_latency) != 0) {
+ LOG_RTCERR2(SetSenderBufferingMode, it->second->channel_id(),
+ buffer_latency);
+ }
+ }
+ for (RecvChannelMap::iterator it = recv_channels_.begin();
+ it != recv_channels_.end(); ++it) {
+ if (engine()->vie()->rtp()->SetReceiverBufferingMode(
+ it->second->channel_id(), buffer_latency) != 0) {
+ LOG_RTCERR2(SetReceiverBufferingMode, it->second->channel_id(),
+ buffer_latency);
+ }
+ }
+ }
+ return true;
+}
+
+void WebRtcVideoMediaChannel::SetInterface(NetworkInterface* iface) {
+ MediaChannel::SetInterface(iface);
+ // Set the RTP recv/send buffer to a bigger size
+ MediaChannel::SetOption(NetworkInterface::ST_RTP,
+ talk_base::Socket::OPT_RCVBUF,
+ kVideoRtpBufferSize);
+
+ // TODO(sriniv): Remove or re-enable this.
+ // As part of b/8030474, send-buffer is size now controlled through
+ // portallocator flags.
+ // network_interface_->SetOption(NetworkInterface::ST_RTP,
+ // talk_base::Socket::OPT_SNDBUF,
+ // kVideoRtpBufferSize);
+}
+
+void WebRtcVideoMediaChannel::UpdateAspectRatio(int ratio_w, int ratio_h) {
+ ASSERT(ratio_w != 0);
+ ASSERT(ratio_h != 0);
+ ratio_w_ = ratio_w;
+ ratio_h_ = ratio_h;
+ // For now assume that all streams want the same aspect ratio.
+ // TODO(hellner): remove the need for this assumption.
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ VideoCapturer* capturer = send_channel->video_capturer();
+ if (capturer) {
+ capturer->UpdateAspectRatio(ratio_w, ratio_h);
+ }
+ }
+}
+
+bool WebRtcVideoMediaChannel::GetRenderer(uint32 ssrc,
+ VideoRenderer** renderer) {
+ RecvChannelMap::const_iterator it = recv_channels_.find(ssrc);
+ if (it == recv_channels_.end()) {
+ if (first_receive_ssrc_ == ssrc &&
+ recv_channels_.find(0) != recv_channels_.end()) {
+ LOG(LS_INFO) << " GetRenderer " << ssrc
+ << " reuse default renderer #"
+ << vie_channel_;
+ *renderer = recv_channels_[0]->render_adapter()->renderer();
+ return true;
+ }
+ return false;
+ }
+
+ *renderer = it->second->render_adapter()->renderer();
+ return true;
+}
+
+void WebRtcVideoMediaChannel::AdaptAndSendFrame(VideoCapturer* capturer,
+ const VideoFrame* frame) {
+ if (capturer->IsScreencast()) {
+ // Do not adapt frames that are screencast.
+ SendFrame(capturer, frame);
+ return;
+ }
+ // TODO(thorcarpenter): This is broken. One capturer registered on two ssrc
+ // will not send any video to the second ssrc send channel. We should remove
+ // GetSendChannel(capturer) and pass in an ssrc here.
+ WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(capturer);
+ if (!send_channel) {
+ SendFrame(capturer, frame);
+ return;
+ }
+ const VideoFrame* output_frame = NULL;
+ send_channel->AdaptFrame(frame, &output_frame);
+ if (output_frame) {
+ SendFrame(send_channel, output_frame, capturer->IsScreencast());
+ }
+}
+
+// TODO(zhurunz): Add unittests to test this function.
+void WebRtcVideoMediaChannel::SendFrame(VideoCapturer* capturer,
+ const VideoFrame* frame) {
+ // If there's send channel registers to the |capturer|, then only send the
+ // frame to that channel and return. Otherwise send the frame to the default
+ // channel, which currently taking frames from the engine.
+ WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(capturer);
+ if (send_channel) {
+ SendFrame(send_channel, frame, capturer->IsScreencast());
+ return;
+ }
+ // TODO(hellner): Remove below for loop once the captured frame no longer
+ // come from the engine, i.e. the engine no longer owns a capturer.
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ if (send_channel->video_capturer() == NULL) {
+ SendFrame(send_channel, frame, capturer->IsScreencast());
+ }
+ }
+}
+
+bool WebRtcVideoMediaChannel::SendFrame(
+ WebRtcVideoChannelSendInfo* send_channel,
+ const VideoFrame* frame,
+ bool is_screencast) {
+ if (!send_channel) {
+ return false;
+ }
+ if (!send_codec_) {
+ // Send codec has not been set. No reason to process the frame any further.
+ return false;
+ }
+ const VideoFormat& video_format = send_channel->video_format();
+ // If the frame should be dropped.
+ const bool video_format_set = video_format != cricket::VideoFormat();
+ if (video_format_set &&
+ (video_format.width == 0 && video_format.height == 0)) {
+ return true;
+ }
+
+ // Checks if we need to reset vie send codec.
+ if (!MaybeResetVieSendCodec(send_channel,
+ static_cast<int>(frame->GetWidth()),
+ static_cast<int>(frame->GetHeight()),
+ is_screencast, NULL)) {
+ LOG(LS_ERROR) << "MaybeResetVieSendCodec failed with "
+ << frame->GetWidth() << "x" << frame->GetHeight();
+ return false;
+ }
+ const VideoFrame* frame_out = frame;
+ talk_base::scoped_ptr<VideoFrame> processed_frame;
+ // Disable muting for screencast.
+ const bool mute = (send_channel->muted() && !is_screencast);
+ send_channel->ProcessFrame(*frame_out, mute, processed_frame.use());
+ if (processed_frame) {
+ frame_out = processed_frame.get();
+ }
+
+ webrtc::ViEVideoFrameI420 frame_i420;
+ // TODO(ronghuawu): Update the webrtc::ViEVideoFrameI420
+ // to use const unsigned char*
+ frame_i420.y_plane = const_cast<unsigned char*>(frame_out->GetYPlane());
+ frame_i420.u_plane = const_cast<unsigned char*>(frame_out->GetUPlane());
+ frame_i420.v_plane = const_cast<unsigned char*>(frame_out->GetVPlane());
+ frame_i420.y_pitch = frame_out->GetYPitch();
+ frame_i420.u_pitch = frame_out->GetUPitch();
+ frame_i420.v_pitch = frame_out->GetVPitch();
+ frame_i420.width = static_cast<unsigned short>(frame_out->GetWidth());
+ frame_i420.height = static_cast<unsigned short>(frame_out->GetHeight());
+
+ int64 timestamp_ntp_ms = 0;
+ // TODO(justinlin): Reenable after Windows issues with clock drift are fixed.
+ // Currently reverted to old behavior of discarding capture timestamp.
+#if 0
+ // If the frame timestamp is 0, we will use the deliver time.
+ const int64 frame_timestamp = frame->GetTimeStamp();
+ if (frame_timestamp != 0) {
+ if (abs(time(NULL) - frame_timestamp / talk_base::kNumNanosecsPerSec) >
+ kTimestampDeltaInSecondsForWarning) {
+ LOG(LS_WARNING) << "Frame timestamp differs by more than "
+ << kTimestampDeltaInSecondsForWarning << " seconds from "
+ << "current Unix timestamp.";
+ }
+
+ timestamp_ntp_ms =
+ talk_base::UnixTimestampNanosecsToNtpMillisecs(frame_timestamp);
+ }
+#endif
+
+ return send_channel->external_capture()->IncomingFrameI420(
+ frame_i420, timestamp_ntp_ms) == 0;
+}
+
+bool WebRtcVideoMediaChannel::CreateChannel(uint32 ssrc_key,
+ MediaDirection direction,
+ int* channel_id) {
+ // There are 3 types of channels. Sending only, receiving only and
+ // sending and receiving. The sending and receiving channel is the
+ // default channel and there is only one. All other channels that are created
+ // are associated with the default channel which must exist. The default
+ // channel id is stored in |vie_channel_|. All channels need to know about
+ // the default channel to properly handle remb which is why there are
+ // different ViE create channel calls.
+ // For this channel the local and remote ssrc key is 0. However, it may
+ // have a non-zero local and/or remote ssrc depending on if it is currently
+ // sending and/or receiving.
+ if ((vie_channel_ == -1 || direction == MD_SENDRECV) &&
+ (!send_channels_.empty() || !recv_channels_.empty())) {
+ ASSERT(false);
+ return false;
+ }
+
+ *channel_id = -1;
+ if (direction == MD_RECV) {
+ // All rec channels are associated with the default channel |vie_channel_|
+ if (engine_->vie()->base()->CreateReceiveChannel(*channel_id,
+ vie_channel_) != 0) {
+ LOG_RTCERR2(CreateReceiveChannel, *channel_id, vie_channel_);
+ return false;
+ }
+ } else if (direction == MD_SEND) {
+ if (engine_->vie()->base()->CreateChannel(*channel_id,
+ vie_channel_) != 0) {
+ LOG_RTCERR2(CreateChannel, *channel_id, vie_channel_);
+ return false;
+ }
+ } else {
+ ASSERT(direction == MD_SENDRECV);
+ if (engine_->vie()->base()->CreateChannel(*channel_id) != 0) {
+ LOG_RTCERR1(CreateChannel, *channel_id);
+ return false;
+ }
+ }
+ if (!ConfigureChannel(*channel_id, direction, ssrc_key)) {
+ engine_->vie()->base()->DeleteChannel(*channel_id);
+ *channel_id = -1;
+ return false;
+ }
+
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::ConfigureChannel(int channel_id,
+ MediaDirection direction,
+ uint32 ssrc_key) {
+ const bool receiving = (direction == MD_RECV) || (direction == MD_SENDRECV);
+ const bool sending = (direction == MD_SEND) || (direction == MD_SENDRECV);
+ // Register external transport.
+ if (engine_->vie()->network()->RegisterSendTransport(
+ channel_id, *this) != 0) {
+ LOG_RTCERR1(RegisterSendTransport, channel_id);
+ return false;
+ }
+
+ // Set MTU.
+ if (engine_->vie()->network()->SetMTU(channel_id, kVideoMtu) != 0) {
+ LOG_RTCERR2(SetMTU, channel_id, kVideoMtu);
+ return false;
+ }
+ // Turn on RTCP and loss feedback reporting.
+ if (engine()->vie()->rtp()->SetRTCPStatus(
+ channel_id, webrtc::kRtcpCompound_RFC4585) != 0) {
+ LOG_RTCERR2(SetRTCPStatus, channel_id, webrtc::kRtcpCompound_RFC4585);
+ return false;
+ }
+ // Enable pli as key frame request method.
+ if (engine_->vie()->rtp()->SetKeyFrameRequestMethod(
+ channel_id, webrtc::kViEKeyFrameRequestPliRtcp) != 0) {
+ LOG_RTCERR2(SetKeyFrameRequestMethod,
+ channel_id, webrtc::kViEKeyFrameRequestPliRtcp);
+ return false;
+ }
+ if (!SetNackFec(channel_id, send_red_type_, send_fec_type_, nack_enabled_)) {
+ // Logged in SetNackFec. Don't spam the logs.
+ return false;
+ }
+ // Note that receiving must always be configured before sending to ensure
+ // that send and receive channel is configured correctly (ConfigureReceiving
+ // assumes no sending).
+ if (receiving) {
+ if (!ConfigureReceiving(channel_id, ssrc_key)) {
+ return false;
+ }
+ }
+ if (sending) {
+ if (!ConfigureSending(channel_id, ssrc_key)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::ConfigureReceiving(int channel_id,
+ uint32 remote_ssrc_key) {
+ // Make sure that an SSRC/key isn't registered more than once.
+ if (recv_channels_.find(remote_ssrc_key) != recv_channels_.end()) {
+ return false;
+ }
+ // Connect the voice channel, if there is one.
+ // TODO(perkj): The A/V is synched by the receiving channel. So we need to
+ // know the SSRC of the remote audio channel in order to fetch the correct
+ // webrtc VoiceEngine channel. For now- only sync the default channel used
+ // in 1-1 calls.
+ if (remote_ssrc_key == 0 && voice_channel_) {
+ WebRtcVoiceMediaChannel* voice_channel =
+ static_cast<WebRtcVoiceMediaChannel*>(voice_channel_);
+ if (engine_->vie()->base()->ConnectAudioChannel(
+ vie_channel_, voice_channel->voe_channel()) != 0) {
+ LOG_RTCERR2(ConnectAudioChannel, channel_id,
+ voice_channel->voe_channel());
+ LOG(LS_WARNING) << "A/V not synchronized";
+ // Not a fatal error.
+ }
+ }
+
+ talk_base::scoped_ptr<WebRtcVideoChannelRecvInfo> channel_info(
+ new WebRtcVideoChannelRecvInfo(channel_id));
+
+ // Install a render adapter.
+ if (engine_->vie()->render()->AddRenderer(channel_id,
+ webrtc::kVideoI420, channel_info->render_adapter()) != 0) {
+ LOG_RTCERR3(AddRenderer, channel_id, webrtc::kVideoI420,
+ channel_info->render_adapter());
+ return false;
+ }
+
+
+ if (engine_->vie()->rtp()->SetRembStatus(channel_id,
+ kNotSending,
+ remb_enabled_) != 0) {
+ LOG_RTCERR3(SetRembStatus, channel_id, kNotSending, remb_enabled_);
+ return false;
+ }
+
+ if (!SetHeaderExtension(&webrtc::ViERTP_RTCP::SetReceiveTimestampOffsetStatus,
+ channel_id, receive_extensions_, kRtpTimestampOffsetHeaderExtension)) {
+ return false;
+ }
+
+ if (!SetHeaderExtension(
+ &webrtc::ViERTP_RTCP::SetReceiveAbsoluteSendTimeStatus, channel_id,
+ receive_extensions_, kRtpAbsoluteSendTimeHeaderExtension)) {
+ return false;
+ }
+
+ if (remote_ssrc_key != 0) {
+ // Use the same SSRC as our default channel
+ // (so the RTCP reports are correct).
+ unsigned int send_ssrc = 0;
+ webrtc::ViERTP_RTCP* rtp = engine()->vie()->rtp();
+ if (rtp->GetLocalSSRC(vie_channel_, send_ssrc) == -1) {
+ LOG_RTCERR2(GetLocalSSRC, vie_channel_, send_ssrc);
+ return false;
+ }
+ if (rtp->SetLocalSSRC(channel_id, send_ssrc) == -1) {
+ LOG_RTCERR2(SetLocalSSRC, channel_id, send_ssrc);
+ return false;
+ }
+ } // Else this is the the default channel and we don't change the SSRC.
+
+ // Disable color enhancement since it is a bit too aggressive.
+ if (engine()->vie()->image()->EnableColorEnhancement(channel_id,
+ false) != 0) {
+ LOG_RTCERR1(EnableColorEnhancement, channel_id);
+ return false;
+ }
+
+ if (!SetReceiveCodecs(channel_info.get())) {
+ return false;
+ }
+
+ int buffer_latency =
+ options_.buffered_mode_latency.GetWithDefaultIfUnset(
+ cricket::kBufferedModeDisabled);
+ if (buffer_latency != cricket::kBufferedModeDisabled) {
+ if (engine()->vie()->rtp()->SetReceiverBufferingMode(
+ channel_id, buffer_latency) != 0) {
+ LOG_RTCERR2(SetReceiverBufferingMode, channel_id, buffer_latency);
+ }
+ }
+
+ if (render_started_) {
+ if (engine_->vie()->render()->StartRender(channel_id) != 0) {
+ LOG_RTCERR1(StartRender, channel_id);
+ return false;
+ }
+ }
+
+ // Register decoder observer for incoming framerate and bitrate.
+ if (engine()->vie()->codec()->RegisterDecoderObserver(
+ channel_id, *channel_info->decoder_observer()) != 0) {
+ LOG_RTCERR1(RegisterDecoderObserver, channel_info->decoder_observer());
+ return false;
+ }
+
+ recv_channels_[remote_ssrc_key] = channel_info.release();
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::ConfigureSending(int channel_id,
+ uint32 local_ssrc_key) {
+ // The ssrc key can be zero or correspond to an SSRC.
+ // Make sure the default channel isn't configured more than once.
+ if (local_ssrc_key == 0 && send_channels_.find(0) != send_channels_.end()) {
+ return false;
+ }
+ // Make sure that the SSRC is not already in use.
+ uint32 dummy_key;
+ if (GetSendChannelKey(local_ssrc_key, &dummy_key)) {
+ return false;
+ }
+ int vie_capture = 0;
+ webrtc::ViEExternalCapture* external_capture = NULL;
+ // Register external capture.
+ if (engine()->vie()->capture()->AllocateExternalCaptureDevice(
+ vie_capture, external_capture) != 0) {
+ LOG_RTCERR0(AllocateExternalCaptureDevice);
+ return false;
+ }
+
+ // Connect external capture.
+ if (engine()->vie()->capture()->ConnectCaptureDevice(
+ vie_capture, channel_id) != 0) {
+ LOG_RTCERR2(ConnectCaptureDevice, vie_capture, channel_id);
+ return false;
+ }
+ talk_base::scoped_ptr<WebRtcVideoChannelSendInfo> send_channel(
+ new WebRtcVideoChannelSendInfo(channel_id, vie_capture,
+ external_capture,
+ engine()->cpu_monitor()));
+ send_channel->ApplyCpuOptions(options_);
+ send_channel->SignalCpuAdaptationUnable.connect(this,
+ &WebRtcVideoMediaChannel::OnCpuAdaptationUnable);
+
+ // Register encoder observer for outgoing framerate and bitrate.
+ if (engine()->vie()->codec()->RegisterEncoderObserver(
+ channel_id, *send_channel->encoder_observer()) != 0) {
+ LOG_RTCERR1(RegisterEncoderObserver, send_channel->encoder_observer());
+ return false;
+ }
+
+ if (!SetHeaderExtension(&webrtc::ViERTP_RTCP::SetSendTimestampOffsetStatus,
+ channel_id, send_extensions_, kRtpTimestampOffsetHeaderExtension)) {
+ return false;
+ }
+
+ if (!SetHeaderExtension(&webrtc::ViERTP_RTCP::SetSendAbsoluteSendTimeStatus,
+ channel_id, send_extensions_, kRtpAbsoluteSendTimeHeaderExtension)) {
+ return false;
+ }
+
+ if (options_.video_leaky_bucket.GetWithDefaultIfUnset(false)) {
+ if (engine()->vie()->rtp()->SetTransmissionSmoothingStatus(channel_id,
+ true) != 0) {
+ LOG_RTCERR2(SetTransmissionSmoothingStatus, channel_id, true);
+ return false;
+ }
+ }
+
+ int buffer_latency =
+ options_.buffered_mode_latency.GetWithDefaultIfUnset(
+ cricket::kBufferedModeDisabled);
+ if (buffer_latency != cricket::kBufferedModeDisabled) {
+ if (engine()->vie()->rtp()->SetSenderBufferingMode(
+ channel_id, buffer_latency) != 0) {
+ LOG_RTCERR2(SetSenderBufferingMode, channel_id, buffer_latency);
+ }
+ }
+ // The remb status direction correspond to the RTP stream (and not the RTCP
+ // stream). I.e. if send remb is enabled it means it is receiving remote
+ // rembs and should use them to estimate bandwidth. Receive remb mean that
+ // remb packets will be generated and that the channel should be included in
+ // it. If remb is enabled all channels are allowed to contribute to the remb
+ // but only receive channels will ever end up actually contributing. This
+ // keeps the logic simple.
+ if (engine_->vie()->rtp()->SetRembStatus(channel_id,
+ remb_enabled_,
+ remb_enabled_) != 0) {
+ LOG_RTCERR3(SetRembStatus, channel_id, remb_enabled_, remb_enabled_);
+ return false;
+ }
+ if (!SetNackFec(channel_id, send_red_type_, send_fec_type_, nack_enabled_)) {
+ // Logged in SetNackFec. Don't spam the logs.
+ return false;
+ }
+
+ send_channels_[local_ssrc_key] = send_channel.release();
+
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetNackFec(int channel_id,
+ int red_payload_type,
+ int fec_payload_type,
+ bool nack_enabled) {
+ bool enable = (red_payload_type != -1 && fec_payload_type != -1 &&
+ !InConferenceMode());
+ if (enable) {
+ if (engine_->vie()->rtp()->SetHybridNACKFECStatus(
+ channel_id, nack_enabled, red_payload_type, fec_payload_type) != 0) {
+ LOG_RTCERR4(SetHybridNACKFECStatus,
+ channel_id, nack_enabled, red_payload_type, fec_payload_type);
+ return false;
+ }
+ LOG(LS_INFO) << "Hybrid NACK/FEC enabled for channel " << channel_id;
+ } else {
+ if (engine_->vie()->rtp()->SetNACKStatus(channel_id, nack_enabled) != 0) {
+ LOG_RTCERR1(SetNACKStatus, channel_id);
+ return false;
+ }
+ LOG(LS_INFO) << "NACK enabled for channel " << channel_id;
+ }
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetSendCodec(const webrtc::VideoCodec& codec,
+ int min_bitrate,
+ int start_bitrate,
+ int max_bitrate) {
+ bool ret_val = true;
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ ret_val = SetSendCodec(send_channel, codec, min_bitrate, start_bitrate,
+ max_bitrate) && ret_val;
+ }
+ if (ret_val) {
+ // All SetSendCodec calls were successful. Update the global state
+ // accordingly.
+ send_codec_.reset(new webrtc::VideoCodec(codec));
+ send_min_bitrate_ = min_bitrate;
+ send_start_bitrate_ = start_bitrate;
+ send_max_bitrate_ = max_bitrate;
+ } else {
+ // At least one SetSendCodec call failed, rollback.
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ if (send_codec_) {
+ SetSendCodec(send_channel, *send_codec_.get(), send_min_bitrate_,
+ send_start_bitrate_, send_max_bitrate_);
+ }
+ }
+ }
+ return ret_val;
+}
+
+bool WebRtcVideoMediaChannel::SetSendCodec(
+ WebRtcVideoChannelSendInfo* send_channel,
+ const webrtc::VideoCodec& codec,
+ int min_bitrate,
+ int start_bitrate,
+ int max_bitrate) {
+ if (!send_channel) {
+ return false;
+ }
+ const int channel_id = send_channel->channel_id();
+ // Make a copy of the codec
+ webrtc::VideoCodec target_codec = codec;
+ target_codec.startBitrate = start_bitrate;
+ target_codec.minBitrate = min_bitrate;
+ target_codec.maxBitrate = max_bitrate;
+
+ // Set the default number of temporal layers for VP8.
+ if (webrtc::kVideoCodecVP8 == codec.codecType) {
+ target_codec.codecSpecific.VP8.numberOfTemporalLayers =
+ kDefaultNumberOfTemporalLayers;
+
+ // Turn off the VP8 error resilience
+ target_codec.codecSpecific.VP8.resilience = webrtc::kResilienceOff;
+
+ bool enable_denoising =
+ options_.video_noise_reduction.GetWithDefaultIfUnset(false);
+ target_codec.codecSpecific.VP8.denoisingOn = enable_denoising;
+ }
+
+ // Register external encoder if codec type is supported by encoder factory.
+ if (engine()->IsExternalEncoderCodecType(codec.codecType) &&
+ !send_channel->IsEncoderRegistered(target_codec.plType)) {
+ webrtc::VideoEncoder* encoder =
+ engine()->CreateExternalEncoder(codec.codecType);
+ if (encoder) {
+ if (engine()->vie()->ext_codec()->RegisterExternalSendCodec(
+ channel_id, target_codec.plType, encoder, false) == 0) {
+ send_channel->RegisterEncoder(target_codec.plType, encoder);
+ } else {
+ LOG_RTCERR2(RegisterExternalSendCodec, channel_id, target_codec.plName);
+ engine()->DestroyExternalEncoder(encoder);
+ }
+ }
+ }
+
+ // Resolution and framerate may vary for different send channels.
+ const VideoFormat& video_format = send_channel->video_format();
+ UpdateVideoCodec(video_format, &target_codec);
+
+ if (target_codec.width == 0 && target_codec.height == 0) {
+ const uint32 ssrc = send_channel->stream_params()->first_ssrc();
+ LOG(LS_INFO) << "0x0 resolution selected. Captured frames will be dropped "
+ << "for ssrc: " << ssrc << ".";
+ } else {
+ MaybeChangeStartBitrate(channel_id, &target_codec);
+ if (0 != engine()->vie()->codec()->SetSendCodec(channel_id, target_codec)) {
+ LOG_RTCERR2(SetSendCodec, channel_id, target_codec.plName);
+ return false;
+ }
+
+ }
+ send_channel->set_interval(
+ cricket::VideoFormat::FpsToInterval(target_codec.maxFramerate));
+ return true;
+}
+
+
+static std::string ToString(webrtc::VideoCodecComplexity complexity) {
+ switch (complexity) {
+ case webrtc::kComplexityNormal:
+ return "normal";
+ case webrtc::kComplexityHigh:
+ return "high";
+ case webrtc::kComplexityHigher:
+ return "higher";
+ case webrtc::kComplexityMax:
+ return "max";
+ default:
+ return "unknown";
+ }
+}
+
+static std::string ToString(webrtc::VP8ResilienceMode resilience) {
+ switch (resilience) {
+ case webrtc::kResilienceOff:
+ return "off";
+ case webrtc::kResilientStream:
+ return "stream";
+ case webrtc::kResilientFrames:
+ return "frames";
+ default:
+ return "unknown";
+ }
+}
+
+void WebRtcVideoMediaChannel::LogSendCodecChange(const std::string& reason) {
+ webrtc::VideoCodec vie_codec;
+ if (engine()->vie()->codec()->GetSendCodec(vie_channel_, vie_codec) != 0) {
+ LOG_RTCERR1(GetSendCodec, vie_channel_);
+ return;
+ }
+
+ LOG(LS_INFO) << reason << " : selected video codec "
+ << vie_codec.plName << "/"
+ << vie_codec.width << "x" << vie_codec.height << "x"
+ << static_cast<int>(vie_codec.maxFramerate) << "fps"
+ << "@" << vie_codec.maxBitrate << "kbps"
+ << " (min=" << vie_codec.minBitrate << "kbps,"
+ << " start=" << vie_codec.startBitrate << "kbps)";
+ LOG(LS_INFO) << "Video max quantization: " << vie_codec.qpMax;
+ if (webrtc::kVideoCodecVP8 == vie_codec.codecType) {
+ LOG(LS_INFO) << "VP8 number of temporal layers: "
+ << static_cast<int>(
+ vie_codec.codecSpecific.VP8.numberOfTemporalLayers);
+ LOG(LS_INFO) << "VP8 options : "
+ << "picture loss indication = "
+ << vie_codec.codecSpecific.VP8.pictureLossIndicationOn
+ << ", feedback mode = "
+ << vie_codec.codecSpecific.VP8.feedbackModeOn
+ << ", complexity = "
+ << ToString(vie_codec.codecSpecific.VP8.complexity)
+ << ", resilience = "
+ << ToString(vie_codec.codecSpecific.VP8.resilience)
+ << ", denoising = "
+ << vie_codec.codecSpecific.VP8.denoisingOn
+ << ", error concealment = "
+ << vie_codec.codecSpecific.VP8.errorConcealmentOn
+ << ", automatic resize = "
+ << vie_codec.codecSpecific.VP8.automaticResizeOn
+ << ", frame dropping = "
+ << vie_codec.codecSpecific.VP8.frameDroppingOn
+ << ", key frame interval = "
+ << vie_codec.codecSpecific.VP8.keyFrameInterval;
+ }
+
+}
+
+bool WebRtcVideoMediaChannel::SetReceiveCodecs(
+ WebRtcVideoChannelRecvInfo* info) {
+ int red_type = -1;
+ int fec_type = -1;
+ int channel_id = info->channel_id();
+ for (std::vector<webrtc::VideoCodec>::iterator it = receive_codecs_.begin();
+ it != receive_codecs_.end(); ++it) {
+ if (it->codecType == webrtc::kVideoCodecRED) {
+ red_type = it->plType;
+ } else if (it->codecType == webrtc::kVideoCodecULPFEC) {
+ fec_type = it->plType;
+ }
+ if (engine()->vie()->codec()->SetReceiveCodec(channel_id, *it) != 0) {
+ LOG_RTCERR2(SetReceiveCodec, channel_id, it->plName);
+ return false;
+ }
+ if (!info->IsDecoderRegistered(it->plType) &&
+ it->codecType != webrtc::kVideoCodecRED &&
+ it->codecType != webrtc::kVideoCodecULPFEC) {
+ webrtc::VideoDecoder* decoder =
+ engine()->CreateExternalDecoder(it->codecType);
+ if (decoder) {
+ if (engine()->vie()->ext_codec()->RegisterExternalReceiveCodec(
+ channel_id, it->plType, decoder) == 0) {
+ info->RegisterDecoder(it->plType, decoder);
+ } else {
+ LOG_RTCERR2(RegisterExternalReceiveCodec, channel_id, it->plName);
+ engine()->DestroyExternalDecoder(decoder);
+ }
+ }
+ }
+ }
+
+ // Start receiving packets if at least one receive codec has been set.
+ if (!receive_codecs_.empty()) {
+ if (engine()->vie()->base()->StartReceive(channel_id) != 0) {
+ LOG_RTCERR1(StartReceive, channel_id);
+ return false;
+ }
+ }
+ return true;
+}
+
+int WebRtcVideoMediaChannel::GetRecvChannelNum(uint32 ssrc) {
+ if (ssrc == first_receive_ssrc_) {
+ return vie_channel_;
+ }
+ RecvChannelMap::iterator it = recv_channels_.find(ssrc);
+ return (it != recv_channels_.end()) ? it->second->channel_id() : -1;
+}
+
+// If the new frame size is different from the send codec size we set on vie,
+// we need to reset the send codec on vie.
+// The new send codec size should not exceed send_codec_ which is controlled
+// only by the 'jec' logic.
+bool WebRtcVideoMediaChannel::MaybeResetVieSendCodec(
+ WebRtcVideoChannelSendInfo* send_channel,
+ int new_width,
+ int new_height,
+ bool is_screencast,
+ bool* reset) {
+ if (reset) {
+ *reset = false;
+ }
+ ASSERT(send_codec_.get() != NULL);
+
+ webrtc::VideoCodec target_codec = *send_codec_.get();
+ const VideoFormat& video_format = send_channel->video_format();
+ UpdateVideoCodec(video_format, &target_codec);
+
+ // Vie send codec size should not exceed target_codec.
+ int target_width = new_width;
+ int target_height = new_height;
+ if (!is_screencast &&
+ (new_width > target_codec.width || new_height > target_codec.height)) {
+ target_width = target_codec.width;
+ target_height = target_codec.height;
+ }
+
+ // Get current vie codec.
+ webrtc::VideoCodec vie_codec;
+ const int channel_id = send_channel->channel_id();
+ if (engine()->vie()->codec()->GetSendCodec(channel_id, vie_codec) != 0) {
+ LOG_RTCERR1(GetSendCodec, channel_id);
+ return false;
+ }
+ const int cur_width = vie_codec.width;
+ const int cur_height = vie_codec.height;
+
+ // Only reset send codec when there is a size change. Additionally,
+ // automatic resize needs to be turned off when screencasting and on when
+ // not screencasting.
+ // Don't allow automatic resizing for screencasting.
+ bool automatic_resize = !is_screencast;
+ // Turn off VP8 frame dropping when screensharing as the current model does
+ // not work well at low fps.
+ bool vp8_frame_dropping = !is_screencast;
+ // Disable denoising for screencasting.
+ bool enable_denoising =
+ options_.video_noise_reduction.GetWithDefaultIfUnset(false);
+ bool denoising = !is_screencast && enable_denoising;
+ bool reset_send_codec =
+ target_width != cur_width || target_height != cur_height ||
+ automatic_resize != vie_codec.codecSpecific.VP8.automaticResizeOn ||
+ denoising != vie_codec.codecSpecific.VP8.denoisingOn ||
+ vp8_frame_dropping != vie_codec.codecSpecific.VP8.frameDroppingOn;
+
+ if (reset_send_codec) {
+ // Set the new codec on vie.
+ vie_codec.width = target_width;
+ vie_codec.height = target_height;
+ vie_codec.maxFramerate = target_codec.maxFramerate;
+ vie_codec.startBitrate = target_codec.startBitrate;
+ vie_codec.codecSpecific.VP8.automaticResizeOn = automatic_resize;
+ vie_codec.codecSpecific.VP8.denoisingOn = denoising;
+ vie_codec.codecSpecific.VP8.frameDroppingOn = vp8_frame_dropping;
+ // TODO(mflodman): Remove 'is_screencast' check when screen cast settings
+ // are treated correctly in WebRTC.
+ if (!is_screencast)
+ MaybeChangeStartBitrate(channel_id, &vie_codec);
+
+ if (engine()->vie()->codec()->SetSendCodec(channel_id, vie_codec) != 0) {
+ LOG_RTCERR1(SetSendCodec, channel_id);
+ return false;
+ }
+ if (reset) {
+ *reset = true;
+ }
+ LogSendCodecChange("Capture size changed");
+ }
+
+ return true;
+}
+
+void WebRtcVideoMediaChannel::MaybeChangeStartBitrate(
+ int channel_id, webrtc::VideoCodec* video_codec) {
+ if (video_codec->startBitrate < video_codec->minBitrate) {
+ video_codec->startBitrate = video_codec->minBitrate;
+ } else if (video_codec->startBitrate > video_codec->maxBitrate) {
+ video_codec->startBitrate = video_codec->maxBitrate;
+ }
+
+ // Use a previous target bitrate, if there is one.
+ unsigned int current_target_bitrate = 0;
+ if (engine()->vie()->codec()->GetCodecTargetBitrate(
+ channel_id, &current_target_bitrate) == 0) {
+ // Convert to kbps.
+ current_target_bitrate /= 1000;
+ if (current_target_bitrate > video_codec->maxBitrate) {
+ current_target_bitrate = video_codec->maxBitrate;
+ }
+ if (current_target_bitrate > video_codec->startBitrate) {
+ video_codec->startBitrate = current_target_bitrate;
+ }
+ }
+}
+
+void WebRtcVideoMediaChannel::OnMessage(talk_base::Message* msg) {
+ FlushBlackFrameData* black_frame_data =
+ static_cast<FlushBlackFrameData*>(msg->pdata);
+ FlushBlackFrame(black_frame_data->ssrc, black_frame_data->timestamp);
+ delete black_frame_data;
+}
+
+int WebRtcVideoMediaChannel::SendPacket(int channel, const void* data,
+ int len) {
+ talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+ return MediaChannel::SendPacket(&packet) ? len : -1;
+}
+
+int WebRtcVideoMediaChannel::SendRTCPPacket(int channel,
+ const void* data,
+ int len) {
+ talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+ return MediaChannel::SendRtcp(&packet) ? len : -1;
+}
+
+void WebRtcVideoMediaChannel::QueueBlackFrame(uint32 ssrc, int64 timestamp,
+ int framerate) {
+ if (timestamp) {
+ FlushBlackFrameData* black_frame_data = new FlushBlackFrameData(
+ ssrc,
+ timestamp);
+ const int delay_ms = static_cast<int>(
+ 2 * cricket::VideoFormat::FpsToInterval(framerate) *
+ talk_base::kNumMillisecsPerSec / talk_base::kNumNanosecsPerSec);
+ worker_thread()->PostDelayed(delay_ms, this, 0, black_frame_data);
+ }
+}
+
+void WebRtcVideoMediaChannel::FlushBlackFrame(uint32 ssrc, int64 timestamp) {
+ WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
+ if (!send_channel) {
+ return;
+ }
+ talk_base::scoped_ptr<const VideoFrame> black_frame_ptr;
+
+ const WebRtcLocalStreamInfo* channel_stream_info =
+ send_channel->local_stream_info();
+ int64 last_frame_time_stamp = channel_stream_info->time_stamp();
+ if (last_frame_time_stamp == timestamp) {
+ size_t last_frame_width = 0;
+ size_t last_frame_height = 0;
+ int64 last_frame_elapsed_time = 0;
+ channel_stream_info->GetLastFrameInfo(&last_frame_width, &last_frame_height,
+ &last_frame_elapsed_time);
+ if (!last_frame_width || !last_frame_height) {
+ return;
+ }
+ WebRtcVideoFrame black_frame;
+ // Black frame is not screencast.
+ const bool screencasting = false;
+ const int64 timestamp_delta = send_channel->interval();
+ if (!black_frame.InitToBlack(send_codec_->width, send_codec_->height, 1, 1,
+ last_frame_elapsed_time + timestamp_delta,
+ last_frame_time_stamp + timestamp_delta) ||
+ !SendFrame(send_channel, &black_frame, screencasting)) {
+ LOG(LS_ERROR) << "Failed to send black frame.";
+ }
+ }
+}
+
+void WebRtcVideoMediaChannel::OnCpuAdaptationUnable() {
+ // ssrc is hardcoded to 0. This message is based on a system wide issue,
+ // so finding which ssrc caused it doesn't matter.
+ SignalMediaError(0, VideoMediaChannel::ERROR_REC_CPU_MAX_CANT_DOWNGRADE);
+}
+
+void WebRtcVideoMediaChannel::SetNetworkTransmissionState(
+ bool is_transmitting) {
+ LOG(LS_INFO) << "SetNetworkTransmissionState: " << is_transmitting;
+ for (SendChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ WebRtcVideoChannelSendInfo* send_channel = iter->second;
+ int channel_id = send_channel->channel_id();
+ engine_->vie()->network()->SetNetworkTransmissionState(channel_id,
+ is_transmitting);
+ }
+}
+
+bool WebRtcVideoMediaChannel::SetHeaderExtension(ExtensionSetterFunction setter,
+ int channel_id, const RtpHeaderExtension* extension) {
+ bool enable = false;
+ int id = 0;
+ if (extension) {
+ enable = true;
+ id = extension->id;
+ }
+ if ((engine_->vie()->rtp()->*setter)(channel_id, enable, id) != 0) {
+ LOG_RTCERR4(*setter, extension->uri, channel_id, enable, id);
+ return false;
+ }
+ return true;
+}
+
+bool WebRtcVideoMediaChannel::SetHeaderExtension(ExtensionSetterFunction setter,
+ int channel_id, const std::vector<RtpHeaderExtension>& extensions,
+ const char header_extension_uri[]) {
+ const RtpHeaderExtension* extension = FindHeaderExtension(extensions,
+ header_extension_uri);
+ return SetHeaderExtension(setter, channel_id, extension);
+}
+} // namespace cricket
+
+#endif // HAVE_WEBRTC_VIDEO
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine.h
new file mode 100644
index 00000000000..f0293bb5d41
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine.h
@@ -0,0 +1,441 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_WEBRTCVIDEOENGINE_H_
+#define TALK_MEDIA_WEBRTCVIDEOENGINE_H_
+
+#include <map>
+#include <vector>
+
+#include "talk/base/scoped_ptr.h"
+#include "talk/media/base/codec.h"
+#include "talk/media/base/videocommon.h"
+#include "talk/media/webrtc/webrtccommon.h"
+#include "talk/media/webrtc/webrtcexport.h"
+#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
+#include "talk/session/media/channel.h"
+#include "webrtc/video_engine/include/vie_base.h"
+
+#if !defined(LIBPEERCONNECTION_LIB) && \
+ !defined(LIBPEERCONNECTION_IMPLEMENTATION)
+#error "Bogus include."
+#endif
+
+namespace webrtc {
+class VideoCaptureModule;
+class VideoDecoder;
+class VideoEncoder;
+class VideoRender;
+class ViEExternalCapture;
+class ViERTP_RTCP;
+}
+
+namespace talk_base {
+class CpuMonitor;
+} // namespace talk_base
+
+namespace cricket {
+
+class VideoCapturer;
+class VideoFrame;
+class VideoProcessor;
+class VideoRenderer;
+class ViETraceWrapper;
+class ViEWrapper;
+class VoiceMediaChannel;
+class WebRtcDecoderObserver;
+class WebRtcEncoderObserver;
+class WebRtcLocalStreamInfo;
+class WebRtcRenderAdapter;
+class WebRtcVideoChannelRecvInfo;
+class WebRtcVideoChannelSendInfo;
+class WebRtcVideoDecoderFactory;
+class WebRtcVideoEncoderFactory;
+class WebRtcVideoMediaChannel;
+class WebRtcVoiceEngine;
+
+struct CapturedFrame;
+struct Device;
+
+class WebRtcVideoEngine : public sigslot::has_slots<>,
+ public webrtc::TraceCallback,
+ public WebRtcVideoEncoderFactory::Observer {
+ public:
+ // Creates the WebRtcVideoEngine with internal VideoCaptureModule.
+ WebRtcVideoEngine();
+ // For testing purposes. Allows the WebRtcVoiceEngine,
+ // ViEWrapper and CpuMonitor to be mocks.
+ // TODO(juberti): Remove the 3-arg ctor once fake tracing is implemented.
+ WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
+ ViEWrapper* vie_wrapper,
+ talk_base::CpuMonitor* cpu_monitor);
+ WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
+ ViEWrapper* vie_wrapper,
+ ViETraceWrapper* tracing,
+ talk_base::CpuMonitor* cpu_monitor);
+ ~WebRtcVideoEngine();
+
+ // Basic video engine implementation.
+ bool Init(talk_base::Thread* worker_thread);
+ void Terminate();
+
+ int GetCapabilities();
+ bool SetOptions(int options);
+ bool SetDefaultEncoderConfig(const VideoEncoderConfig& config);
+
+ WebRtcVideoMediaChannel* CreateChannel(VoiceMediaChannel* voice_channel);
+
+ const std::vector<VideoCodec>& codecs() const;
+ const std::vector<RtpHeaderExtension>& rtp_header_extensions() const;
+ void SetLogging(int min_sev, const char* filter);
+
+ bool SetLocalRenderer(VideoRenderer* renderer);
+ sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange;
+
+ // Set the VoiceEngine for A/V sync. This can only be called before Init.
+ bool SetVoiceEngine(WebRtcVoiceEngine* voice_engine);
+ // Set a WebRtcVideoDecoderFactory for external decoding. Video engine does
+ // not take the ownership of |decoder_factory|. The caller needs to make sure
+ // that |decoder_factory| outlives the video engine.
+ void SetExternalDecoderFactory(WebRtcVideoDecoderFactory* decoder_factory);
+ // Set a WebRtcVideoEncoderFactory for external encoding. Video engine does
+ // not take the ownership of |encoder_factory|. The caller needs to make sure
+ // that |encoder_factory| outlives the video engine.
+ void SetExternalEncoderFactory(WebRtcVideoEncoderFactory* encoder_factory);
+ // Enable the render module with timing control.
+ bool EnableTimedRender();
+
+ // Returns an external decoder for the given codec type. The return value
+ // can be NULL if decoder factory is not given or it does not support the
+ // codec type. The caller takes the ownership of the returned object.
+ webrtc::VideoDecoder* CreateExternalDecoder(webrtc::VideoCodecType type);
+ // Releases the decoder instance created by CreateExternalDecoder().
+ void DestroyExternalDecoder(webrtc::VideoDecoder* decoder);
+
+ // Returns an external encoder for the given codec type. The return value
+ // can be NULL if encoder factory is not given or it does not support the
+ // codec type. The caller takes the ownership of the returned object.
+ webrtc::VideoEncoder* CreateExternalEncoder(webrtc::VideoCodecType type);
+ // Releases the encoder instance created by CreateExternalEncoder().
+ void DestroyExternalEncoder(webrtc::VideoEncoder* encoder);
+
+ // Returns true if the codec type is supported by the external encoder.
+ bool IsExternalEncoderCodecType(webrtc::VideoCodecType type) const;
+
+ // Functions called by WebRtcVideoMediaChannel.
+ talk_base::Thread* worker_thread() { return worker_thread_; }
+ ViEWrapper* vie() { return vie_wrapper_.get(); }
+ const VideoFormat& default_codec_format() const {
+ return default_codec_format_;
+ }
+ int GetLastEngineError();
+ bool FindCodec(const VideoCodec& in);
+ bool CanSendCodec(const VideoCodec& in, const VideoCodec& current,
+ VideoCodec* out);
+ void RegisterChannel(WebRtcVideoMediaChannel* channel);
+ void UnregisterChannel(WebRtcVideoMediaChannel* channel);
+ bool ConvertFromCricketVideoCodec(const VideoCodec& in_codec,
+ webrtc::VideoCodec* out_codec);
+ // Check whether the supplied trace should be ignored.
+ bool ShouldIgnoreTrace(const std::string& trace);
+ int GetNumOfChannels();
+
+ VideoFormat GetStartCaptureFormat() const { return default_codec_format_; }
+
+ talk_base::CpuMonitor* cpu_monitor() { return cpu_monitor_.get(); }
+
+ protected:
+ // When a video processor registers with the engine.
+ // SignalMediaFrame will be invoked for every video frame.
+ // See videoprocessor.h for param reference.
+ sigslot::signal3<uint32, VideoFrame*, bool*> SignalMediaFrame;
+
+ private:
+ typedef std::vector<WebRtcVideoMediaChannel*> VideoChannels;
+ struct VideoCodecPref {
+ const char* name;
+ int payload_type;
+ int pref;
+ };
+
+ static const VideoCodecPref kVideoCodecPrefs[];
+ static const VideoFormatPod kVideoFormats[];
+ static const VideoFormatPod kDefaultVideoFormat;
+
+ void Construct(ViEWrapper* vie_wrapper,
+ ViETraceWrapper* tracing,
+ WebRtcVoiceEngine* voice_engine,
+ talk_base::CpuMonitor* cpu_monitor);
+ bool SetDefaultCodec(const VideoCodec& codec);
+ bool RebuildCodecList(const VideoCodec& max_codec);
+ void SetTraceFilter(int filter);
+ void SetTraceOptions(const std::string& options);
+ bool InitVideoEngine();
+
+ // webrtc::TraceCallback implementation.
+ virtual void Print(webrtc::TraceLevel level, const char* trace, int length);
+
+ // WebRtcVideoEncoderFactory::Observer implementation.
+ virtual void OnCodecsAvailable();
+
+ talk_base::Thread* worker_thread_;
+ talk_base::scoped_ptr<ViEWrapper> vie_wrapper_;
+ bool vie_wrapper_base_initialized_;
+ talk_base::scoped_ptr<ViETraceWrapper> tracing_;
+ WebRtcVoiceEngine* voice_engine_;
+ talk_base::scoped_ptr<webrtc::VideoRender> render_module_;
+ WebRtcVideoEncoderFactory* encoder_factory_;
+ WebRtcVideoDecoderFactory* decoder_factory_;
+ std::vector<VideoCodec> video_codecs_;
+ std::vector<RtpHeaderExtension> rtp_header_extensions_;
+ VideoFormat default_codec_format_;
+
+ bool initialized_;
+ talk_base::CriticalSection channels_crit_;
+ VideoChannels channels_;
+
+ bool capture_started_;
+ int local_renderer_w_;
+ int local_renderer_h_;
+ VideoRenderer* local_renderer_;
+
+ // Critical section to protect the media processor register/unregister
+ // while processing a frame
+ talk_base::CriticalSection signal_media_critical_;
+
+ talk_base::scoped_ptr<talk_base::CpuMonitor> cpu_monitor_;
+};
+
+class WebRtcVideoMediaChannel : public talk_base::MessageHandler,
+ public VideoMediaChannel,
+ public webrtc::Transport {
+ public:
+ WebRtcVideoMediaChannel(WebRtcVideoEngine* engine,
+ VoiceMediaChannel* voice_channel);
+ ~WebRtcVideoMediaChannel();
+ bool Init();
+
+ WebRtcVideoEngine* engine() { return engine_; }
+ VoiceMediaChannel* voice_channel() { return voice_channel_; }
+ int video_channel() const { return vie_channel_; }
+ bool sending() const { return sending_; }
+
+ // VideoMediaChannel implementation
+ virtual bool SetRecvCodecs(const std::vector<VideoCodec> &codecs);
+ virtual bool SetSendCodecs(const std::vector<VideoCodec> &codecs);
+ virtual bool GetSendCodec(VideoCodec* send_codec);
+ virtual bool SetSendStreamFormat(uint32 ssrc, const VideoFormat& format);
+ virtual bool SetRender(bool render);
+ virtual bool SetSend(bool send);
+
+ virtual bool AddSendStream(const StreamParams& sp);
+ virtual bool RemoveSendStream(uint32 ssrc);
+ virtual bool AddRecvStream(const StreamParams& sp);
+ virtual bool RemoveRecvStream(uint32 ssrc);
+ virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer);
+ virtual bool GetStats(VideoMediaInfo* info);
+ virtual bool SetCapturer(uint32 ssrc, VideoCapturer* capturer);
+ virtual bool SendIntraFrame();
+ virtual bool RequestIntraFrame();
+
+ virtual void OnPacketReceived(talk_base::Buffer* packet);
+ virtual void OnRtcpReceived(talk_base::Buffer* packet);
+ virtual void OnReadyToSend(bool ready);
+ virtual bool MuteStream(uint32 ssrc, bool on);
+ virtual bool SetRecvRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions);
+ virtual bool SetSendRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions);
+ virtual bool SetSendBandwidth(bool autobw, int bps);
+ virtual bool SetOptions(const VideoOptions &options);
+ virtual bool GetOptions(VideoOptions *options) const {
+ *options = options_;
+ return true;
+ }
+ virtual void SetInterface(NetworkInterface* iface);
+ virtual void UpdateAspectRatio(int ratio_w, int ratio_h);
+
+ // Public functions for use by tests and other specialized code.
+ uint32 send_ssrc() const { return 0; }
+ bool GetRenderer(uint32 ssrc, VideoRenderer** renderer);
+ void SendFrame(VideoCapturer* capturer, const VideoFrame* frame);
+ bool SendFrame(WebRtcVideoChannelSendInfo* channel_info,
+ const VideoFrame* frame, bool is_screencast);
+
+ void AdaptAndSendFrame(VideoCapturer* capturer, const VideoFrame* frame);
+
+ // Thunk functions for use with HybridVideoEngine
+ void OnLocalFrame(VideoCapturer* capturer, const VideoFrame* frame) {
+ SendFrame(0u, frame, capturer->IsScreencast());
+ }
+ void OnLocalFrameFormat(VideoCapturer* capturer, const VideoFormat* format) {
+ }
+
+ virtual void OnMessage(talk_base::Message* msg);
+
+ protected:
+ int GetLastEngineError() { return engine()->GetLastEngineError(); }
+ virtual int SendPacket(int channel, const void* data, int len);
+ virtual int SendRTCPPacket(int channel, const void* data, int len);
+
+ private:
+ typedef std::map<uint32, WebRtcVideoChannelRecvInfo*> RecvChannelMap;
+ typedef std::map<uint32, WebRtcVideoChannelSendInfo*> SendChannelMap;
+ typedef int (webrtc::ViERTP_RTCP::* ExtensionSetterFunction)(int, bool, int);
+
+ enum MediaDirection { MD_RECV, MD_SEND, MD_SENDRECV };
+
+ // Creates and initializes a ViE channel. When successful |channel_id| will
+ // contain the new channel's ID. If |receiving| is true |ssrc| is the
+ // remote ssrc. If |sending| is true the ssrc is local ssrc. If both
+ // |receiving| and |sending| is true the ssrc must be 0 and the channel will
+ // be created as a default channel. The ssrc must be different for receive
+ // channels and it must be different for send channels. If the same SSRC is
+ // being used for creating channel more than once, this function will fail
+ // returning false.
+ bool CreateChannel(uint32 ssrc_key, MediaDirection direction,
+ int* channel_id);
+ bool ConfigureChannel(int channel_id, MediaDirection direction,
+ uint32 ssrc_key);
+ bool ConfigureReceiving(int channel_id, uint32 remote_ssrc_key);
+ bool ConfigureSending(int channel_id, uint32 local_ssrc_key);
+ bool SetNackFec(int channel_id, int red_payload_type, int fec_payload_type,
+ bool nack_enabled);
+ bool SetSendCodec(const webrtc::VideoCodec& codec, int min_bitrate,
+ int start_bitrate, int max_bitrate);
+ bool SetSendCodec(WebRtcVideoChannelSendInfo* send_channel,
+ const webrtc::VideoCodec& codec, int min_bitrate,
+ int start_bitrate, int max_bitrate);
+ void LogSendCodecChange(const std::string& reason);
+ // Prepares the channel with channel id |info->channel_id()| to receive all
+ // codecs in |receive_codecs_| and start receive packets.
+ bool SetReceiveCodecs(WebRtcVideoChannelRecvInfo* info);
+ // Returns the channel number that receives the stream with SSRC |ssrc|.
+ int GetRecvChannelNum(uint32 ssrc);
+ // Given captured video frame size, checks if we need to reset vie send codec.
+ // |reset| is set to whether resetting has happened on vie or not.
+ // Returns false on error.
+ bool MaybeResetVieSendCodec(WebRtcVideoChannelSendInfo* send_channel,
+ int new_width, int new_height, bool is_screencast,
+ bool* reset);
+ // Checks the current bitrate estimate and modifies the start bitrate
+ // accordingly.
+ void MaybeChangeStartBitrate(int channel_id, webrtc::VideoCodec* video_codec);
+ // Helper function for starting the sending of media on all channels or
+ // |channel_id|. Note that these two function do not change |sending_|.
+ bool StartSend();
+ bool StartSend(WebRtcVideoChannelSendInfo* send_channel);
+ // Helper function for stop the sending of media on all channels or
+ // |channel_id|. Note that these two function do not change |sending_|.
+ bool StopSend();
+ bool StopSend(WebRtcVideoChannelSendInfo* send_channel);
+ bool SendIntraFrame(int channel_id);
+
+ // Send with one local SSRC. Normal case.
+ bool IsOneSsrcStream(const StreamParams& sp);
+
+ bool HasReadySendChannels();
+
+ // Send channel key returns the key corresponding to the provided local SSRC
+ // in |key|. The return value is true upon success.
+ // If the local ssrc correspond to that of the default channel the key is 0.
+ // For all other channels the returned key will be the same as the local ssrc.
+ bool GetSendChannelKey(uint32 local_ssrc, uint32* key);
+ WebRtcVideoChannelSendInfo* GetSendChannel(VideoCapturer* video_capturer);
+ WebRtcVideoChannelSendInfo* GetSendChannel(uint32 local_ssrc);
+ // Creates a new unique key that can be used for inserting a new send channel
+ // into |send_channels_|
+ bool CreateSendChannelKey(uint32 local_ssrc, uint32* key);
+
+ bool IsDefaultChannel(int channel_id) const {
+ return channel_id == vie_channel_;
+ }
+ uint32 GetDefaultChannelSsrc();
+
+ bool DeleteSendChannel(uint32 ssrc_key);
+
+ bool InConferenceMode() const {
+ return options_.conference_mode.GetWithDefaultIfUnset(false);
+ }
+ bool RemoveCapturer(uint32 ssrc);
+
+
+ talk_base::MessageQueue* worker_thread() { return engine_->worker_thread(); }
+ void QueueBlackFrame(uint32 ssrc, int64 timestamp, int framerate);
+ void FlushBlackFrame(uint32 ssrc, int64 timestamp);
+
+ void SetNetworkTransmissionState(bool is_transmitting);
+
+ bool SetHeaderExtension(ExtensionSetterFunction setter, int channel_id,
+ const RtpHeaderExtension* extension);
+ bool SetHeaderExtension(ExtensionSetterFunction setter, int channel_id,
+ const std::vector<RtpHeaderExtension>& extensions,
+ const char header_extension_uri[]);
+
+ // Signal when cpu adaptation has no further scope to adapt.
+ void OnCpuAdaptationUnable();
+
+ // Global state.
+ WebRtcVideoEngine* engine_;
+ VoiceMediaChannel* voice_channel_;
+ int vie_channel_;
+ bool nack_enabled_;
+ // Receiver Estimated Max Bitrate
+ bool remb_enabled_;
+ VideoOptions options_;
+
+ // Global recv side state.
+ // Note the default channel (vie_channel_), i.e. the send channel
+ // corresponding to all the receive channels (this must be done for REMB to
+ // work properly), resides in both recv_channels_ and send_channels_ with the
+ // ssrc key 0.
+ RecvChannelMap recv_channels_; // Contains all receive channels.
+ std::vector<webrtc::VideoCodec> receive_codecs_;
+ bool render_started_;
+ uint32 first_receive_ssrc_;
+ std::vector<RtpHeaderExtension> receive_extensions_;
+
+ // Global send side state.
+ SendChannelMap send_channels_;
+ talk_base::scoped_ptr<webrtc::VideoCodec> send_codec_;
+ int send_red_type_;
+ int send_fec_type_;
+ int send_min_bitrate_;
+ int send_start_bitrate_;
+ int send_max_bitrate_;
+ bool sending_;
+ std::vector<RtpHeaderExtension> send_extensions_;
+
+ // The aspect ratio that the channel desires. 0 means there is no desired
+ // aspect ratio
+ int ratio_w_;
+ int ratio_h_;
+};
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTCVIDEOENGINE_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine_unittest.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine_unittest.cc
new file mode 100644
index 00000000000..840fcdd0868
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoengine_unittest.cc
@@ -0,0 +1,1826 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/base/fakecpumonitor.h"
+#include "talk/base/gunit.h"
+#include "talk/base/logging.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stream.h"
+#include "talk/media/base/constants.h"
+#include "talk/media/base/fakemediaprocessor.h"
+#include "talk/media/base/fakenetworkinterface.h"
+#include "talk/media/base/fakevideorenderer.h"
+#include "talk/media/base/mediachannel.h"
+#include "talk/media/base/testutils.h"
+#include "talk/media/base/videoengine_unittest.h"
+#include "talk/media/webrtc/fakewebrtcvideocapturemodule.h"
+#include "talk/media/webrtc/fakewebrtcvideoengine.h"
+#include "talk/media/webrtc/fakewebrtcvoiceengine.h"
+#include "talk/media/webrtc/webrtcvideocapturer.h"
+#include "talk/media/webrtc/webrtcvideoengine.h"
+#include "talk/media/webrtc/webrtcvideoframe.h"
+#include "talk/media/webrtc/webrtcvoiceengine.h"
+#include "talk/session/media/mediasession.h"
+#include "webrtc/system_wrappers/interface/trace.h"
+
+// Tests for the WebRtcVideoEngine/VideoChannel code.
+
+static const cricket::VideoCodec kVP8Codec720p(100, "VP8", 1280, 720, 30, 0);
+static const cricket::VideoCodec kVP8Codec360p(100, "VP8", 640, 360, 30, 0);
+static const cricket::VideoCodec kVP8Codec270p(100, "VP8", 480, 270, 30, 0);
+static const cricket::VideoCodec kVP8Codec180p(100, "VP8", 320, 180, 30, 0);
+
+static const cricket::VideoCodec kVP8Codec(100, "VP8", 640, 400, 30, 0);
+static const cricket::VideoCodec kRedCodec(101, "red", 0, 0, 0, 0);
+static const cricket::VideoCodec kUlpFecCodec(102, "ulpfec", 0, 0, 0, 0);
+static const cricket::VideoCodec* const kVideoCodecs[] = {
+ &kVP8Codec,
+ &kRedCodec,
+ &kUlpFecCodec
+};
+
+static const unsigned int kMinBandwidthKbps = 50;
+static const unsigned int kStartBandwidthKbps = 300;
+static const unsigned int kMaxBandwidthKbps = 2000;
+
+static const unsigned int kNumberOfTemporalLayers = 1;
+
+
+class FakeViEWrapper : public cricket::ViEWrapper {
+ public:
+ explicit FakeViEWrapper(cricket::FakeWebRtcVideoEngine* engine)
+ : cricket::ViEWrapper(engine, // base
+ engine, // codec
+ engine, // capture
+ engine, // network
+ engine, // render
+ engine, // rtp
+ engine, // image
+ engine) { // external decoder
+ }
+};
+
+// Test fixture to test WebRtcVideoEngine with a fake webrtc::VideoEngine.
+// Useful for testing failure paths.
+class WebRtcVideoEngineTestFake :
+ public testing::Test,
+ public sigslot::has_slots<> {
+ public:
+ WebRtcVideoEngineTestFake()
+ : vie_(kVideoCodecs, ARRAY_SIZE(kVideoCodecs)),
+ cpu_monitor_(new talk_base::FakeCpuMonitor(
+ talk_base::Thread::Current())),
+ engine_(NULL, // cricket::WebRtcVoiceEngine
+ new FakeViEWrapper(&vie_), cpu_monitor_),
+ channel_(NULL),
+ voice_channel_(NULL),
+ last_error_(cricket::VideoMediaChannel::ERROR_NONE) {
+ }
+ bool SetupEngine() {
+ bool result = engine_.Init(talk_base::Thread::Current());
+ if (result) {
+ channel_ = engine_.CreateChannel(voice_channel_);
+ channel_->SignalMediaError.connect(this,
+ &WebRtcVideoEngineTestFake::OnMediaError);
+ result = (channel_ != NULL);
+ }
+ return result;
+ }
+ void OnMediaError(uint32 ssrc, cricket::VideoMediaChannel::Error error) {
+ last_error_ = error;
+ }
+ bool SendI420Frame(int width, int height) {
+ if (NULL == channel_) {
+ return false;
+ }
+ cricket::WebRtcVideoFrame frame;
+ size_t size = width * height * 3 / 2; // I420
+ talk_base::scoped_array<uint8> pixel(new uint8[size]);
+ if (!frame.Init(cricket::FOURCC_I420,
+ width, height, width, height,
+ pixel.get(), size, 1, 1, 0, 0, 0)) {
+ return false;
+ }
+ cricket::FakeVideoCapturer capturer;
+ channel_->SendFrame(&capturer, &frame);
+ return true;
+ }
+ bool SendI420ScreencastFrame(int width, int height) {
+ return SendI420ScreencastFrameWithTimestamp(width, height, 0);
+ }
+ bool SendI420ScreencastFrameWithTimestamp(
+ int width, int height, int64 timestamp) {
+ if (NULL == channel_) {
+ return false;
+ }
+ cricket::WebRtcVideoFrame frame;
+ size_t size = width * height * 3 / 2; // I420
+ talk_base::scoped_array<uint8> pixel(new uint8[size]);
+ if (!frame.Init(cricket::FOURCC_I420,
+ width, height, width, height,
+ pixel.get(), size, 1, 1, 0, timestamp, 0)) {
+ return false;
+ }
+ cricket::FakeVideoCapturer capturer;
+ capturer.SetScreencast(true);
+ channel_->SendFrame(&capturer, &frame);
+ return true;
+ }
+ void VerifyVP8SendCodec(int channel_num,
+ unsigned int width,
+ unsigned int height,
+ unsigned int layers = 0,
+ unsigned int max_bitrate = kMaxBandwidthKbps,
+ unsigned int min_bitrate = kMinBandwidthKbps,
+ unsigned int start_bitrate = kStartBandwidthKbps,
+ unsigned int fps = 30,
+ unsigned int max_quantization = 0
+ ) {
+ webrtc::VideoCodec gcodec;
+ EXPECT_EQ(0, vie_.GetSendCodec(channel_num, gcodec));
+
+ // Video codec properties.
+ EXPECT_EQ(webrtc::kVideoCodecVP8, gcodec.codecType);
+ EXPECT_STREQ("VP8", gcodec.plName);
+ EXPECT_EQ(100, gcodec.plType);
+ EXPECT_EQ(width, gcodec.width);
+ EXPECT_EQ(height, gcodec.height);
+ EXPECT_EQ(talk_base::_min(start_bitrate, max_bitrate), gcodec.startBitrate);
+ EXPECT_EQ(max_bitrate, gcodec.maxBitrate);
+ EXPECT_EQ(min_bitrate, gcodec.minBitrate);
+ EXPECT_EQ(fps, gcodec.maxFramerate);
+ // VP8 specific.
+ EXPECT_FALSE(gcodec.codecSpecific.VP8.pictureLossIndicationOn);
+ EXPECT_FALSE(gcodec.codecSpecific.VP8.feedbackModeOn);
+ EXPECT_EQ(webrtc::kComplexityNormal, gcodec.codecSpecific.VP8.complexity);
+ EXPECT_EQ(webrtc::kResilienceOff, gcodec.codecSpecific.VP8.resilience);
+ EXPECT_EQ(max_quantization, gcodec.qpMax);
+ }
+ virtual void TearDown() {
+ delete channel_;
+ engine_.Terminate();
+ }
+
+ protected:
+ cricket::FakeWebRtcVideoEngine vie_;
+ cricket::FakeWebRtcVideoDecoderFactory decoder_factory_;
+ cricket::FakeWebRtcVideoEncoderFactory encoder_factory_;
+ talk_base::FakeCpuMonitor* cpu_monitor_;
+ cricket::WebRtcVideoEngine engine_;
+ cricket::WebRtcVideoMediaChannel* channel_;
+ cricket::WebRtcVoiceMediaChannel* voice_channel_;
+ cricket::VideoMediaChannel::Error last_error_;
+};
+
+// Test fixtures to test WebRtcVideoEngine with a real webrtc::VideoEngine.
+class WebRtcVideoEngineTest
+ : public VideoEngineTest<cricket::WebRtcVideoEngine> {
+ protected:
+ typedef VideoEngineTest<cricket::WebRtcVideoEngine> Base;
+};
+class WebRtcVideoMediaChannelTest
+ : public VideoMediaChannelTest<
+ cricket::WebRtcVideoEngine, cricket::WebRtcVideoMediaChannel> {
+ protected:
+ typedef VideoMediaChannelTest<cricket::WebRtcVideoEngine,
+ cricket::WebRtcVideoMediaChannel> Base;
+ virtual cricket::VideoCodec DefaultCodec() { return kVP8Codec; }
+ virtual void SetUp() {
+ Base::SetUp();
+ }
+ virtual void TearDown() {
+ Base::TearDown();
+ }
+};
+
+/////////////////////////
+// Tests with fake ViE //
+/////////////////////////
+
+// Tests that our stub library "works".
+TEST_F(WebRtcVideoEngineTestFake, StartupShutdown) {
+ EXPECT_FALSE(vie_.IsInited());
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ EXPECT_TRUE(vie_.IsInited());
+ engine_.Terminate();
+}
+
+// Tests that webrtc logs are logged when they should be.
+TEST_F(WebRtcVideoEngineTest, WebRtcShouldLog) {
+ const char webrtc_log[] = "WebRtcVideoEngineTest.WebRtcShouldLog";
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ engine_.SetLogging(talk_base::LS_INFO, "");
+ std::string str;
+ talk_base::StringStream stream(str);
+ talk_base::LogMessage::AddLogToStream(&stream, talk_base::LS_INFO);
+ EXPECT_EQ(talk_base::LS_INFO, talk_base::LogMessage::GetLogToStream(&stream));
+ webrtc::Trace::Add(webrtc::kTraceStateInfo, webrtc::kTraceUndefined, 0,
+ webrtc_log);
+ EXPECT_TRUE_WAIT(std::string::npos != str.find(webrtc_log), 10);
+ talk_base::LogMessage::RemoveLogToStream(&stream);
+}
+
+// Tests that webrtc logs are not logged when they should't be.
+TEST_F(WebRtcVideoEngineTest, WebRtcShouldNotLog) {
+ const char webrtc_log[] = "WebRtcVideoEngineTest.WebRtcShouldNotLog";
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ // WebRTC should never be logged lower than LS_INFO.
+ engine_.SetLogging(talk_base::LS_WARNING, "");
+ std::string str;
+ talk_base::StringStream stream(str);
+ // Make sure that WebRTC is not logged, even at lowest severity
+ talk_base::LogMessage::AddLogToStream(&stream, talk_base::LS_SENSITIVE);
+ EXPECT_EQ(talk_base::LS_SENSITIVE,
+ talk_base::LogMessage::GetLogToStream(&stream));
+ webrtc::Trace::Add(webrtc::kTraceStateInfo, webrtc::kTraceUndefined, 0,
+ webrtc_log);
+ talk_base::Thread::Current()->ProcessMessages(10);
+ EXPECT_EQ(std::string::npos, str.find(webrtc_log));
+ talk_base::LogMessage::RemoveLogToStream(&stream);
+}
+
+// Tests that we can create and destroy a channel.
+TEST_F(WebRtcVideoEngineTestFake, CreateChannel) {
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ channel_ = engine_.CreateChannel(voice_channel_);
+ EXPECT_TRUE(channel_ != NULL);
+ EXPECT_EQ(1, engine_.GetNumOfChannels());
+ delete channel_;
+ channel_ = NULL;
+ EXPECT_EQ(0, engine_.GetNumOfChannels());
+}
+
+// Tests that we properly handle failures in CreateChannel.
+TEST_F(WebRtcVideoEngineTestFake, CreateChannelFail) {
+ vie_.set_fail_create_channel(true);
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ channel_ = engine_.CreateChannel(voice_channel_);
+ EXPECT_TRUE(channel_ == NULL);
+}
+
+// Tests that we properly handle failures in AllocateExternalCaptureDevice.
+TEST_F(WebRtcVideoEngineTestFake, AllocateExternalCaptureDeviceFail) {
+ vie_.set_fail_alloc_capturer(true);
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ channel_ = engine_.CreateChannel(voice_channel_);
+ EXPECT_TRUE(channel_ == NULL);
+}
+
+// Test that we apply our default codecs properly.
+TEST_F(WebRtcVideoEngineTestFake, SetSendCodecs) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ std::vector<cricket::VideoCodec> codecs(engine_.codecs());
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ VerifyVP8SendCodec(channel_num, kVP8Codec.width, kVP8Codec.height);
+ EXPECT_TRUE(vie_.GetHybridNackFecStatus(channel_num));
+ EXPECT_FALSE(vie_.GetNackStatus(channel_num));
+ // TODO(juberti): Check RTCP, PLI, TMMBR.
+}
+
+TEST_F(WebRtcVideoEngineTestFake, SetSendCodecsWithMinMaxBitrate) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ std::vector<cricket::VideoCodec> codecs(engine_.codecs());
+ codecs[0].params[cricket::kCodecParamMinBitrate] = "10";
+ codecs[0].params[cricket::kCodecParamMaxBitrate] = "20";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ VerifyVP8SendCodec(
+ channel_num, kVP8Codec.width, kVP8Codec.height, 0, 20, 10, 20);
+
+ cricket::VideoCodec codec;
+ EXPECT_TRUE(channel_->GetSendCodec(&codec));
+ EXPECT_EQ("10", codec.params[cricket::kCodecParamMinBitrate]);
+ EXPECT_EQ("20", codec.params[cricket::kCodecParamMaxBitrate]);
+}
+
+TEST_F(WebRtcVideoEngineTestFake, SetSendCodecsWithMinMaxBitrateInvalid) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::VideoCodec> codecs(engine_.codecs());
+ codecs[0].params[cricket::kCodecParamMinBitrate] = "30";
+ codecs[0].params[cricket::kCodecParamMaxBitrate] = "20";
+ EXPECT_FALSE(channel_->SetSendCodecs(codecs));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, SetSendCodecsWithLargeMinMaxBitrate) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ std::vector<cricket::VideoCodec> codecs(engine_.codecs());
+ codecs[0].params[cricket::kCodecParamMinBitrate] = "1000";
+ codecs[0].params[cricket::kCodecParamMaxBitrate] = "2000";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ VerifyVP8SendCodec(
+ channel_num, kVP8Codec.width, kVP8Codec.height, 0, 2000, 1000,
+ 1000);
+}
+
+TEST_F(WebRtcVideoEngineTestFake, SetSendCodecsWithMaxQuantization) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ std::vector<cricket::VideoCodec> codecs(engine_.codecs());
+ codecs[0].params[cricket::kCodecParamMaxQuantization] = "21";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ VerifyVP8SendCodec(
+ channel_num, kVP8Codec.width, kVP8Codec.height, 0, 2000, 50, 300,
+ 30, 21);
+
+ cricket::VideoCodec codec;
+ EXPECT_TRUE(channel_->GetSendCodec(&codec));
+ EXPECT_EQ("21", codec.params[cricket::kCodecParamMaxQuantization]);
+}
+
+TEST_F(WebRtcVideoEngineTestFake, SetOptionsWithMaxBitrate) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ std::vector<cricket::VideoCodec> codecs(engine_.codecs());
+ codecs[0].params[cricket::kCodecParamMinBitrate] = "10";
+ codecs[0].params[cricket::kCodecParamMaxBitrate] = "20";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ VerifyVP8SendCodec(
+ channel_num, kVP8Codec.width, kVP8Codec.height, 0, 20, 10, 20);
+
+ // Verify that max bitrate doesn't change after SetOptions().
+ cricket::VideoOptions options;
+ options.video_noise_reduction.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ VerifyVP8SendCodec(
+ channel_num, kVP8Codec.width, kVP8Codec.height, 0, 20, 10, 20);
+
+ options.video_noise_reduction.Set(false);
+ options.conference_mode.Set(false);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ VerifyVP8SendCodec(
+ channel_num, kVP8Codec.width, kVP8Codec.height, 0, 20, 10, 20);
+}
+
+TEST_F(WebRtcVideoEngineTestFake, MaxBitrateResetWithConferenceMode) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ std::vector<cricket::VideoCodec> codecs(engine_.codecs());
+ codecs[0].params[cricket::kCodecParamMinBitrate] = "10";
+ codecs[0].params[cricket::kCodecParamMaxBitrate] = "20";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ VerifyVP8SendCodec(
+ channel_num, kVP8Codec.width, kVP8Codec.height, 0, 20, 10, 20);
+
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ options.conference_mode.Set(false);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ VerifyVP8SendCodec(
+ channel_num, kVP8Codec.width, kVP8Codec.height, 0,
+ kMaxBandwidthKbps, 10, 20);
+}
+
+// Verify the current send bitrate is used as start bitrate when reconfiguring
+// the send codec.
+TEST_F(WebRtcVideoEngineTestFake, StartSendBitrate) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(1)));
+ int send_channel = vie_.GetLastChannel();
+ cricket::VideoCodec codec(kVP8Codec);
+ std::vector<cricket::VideoCodec> codec_list;
+ codec_list.push_back(codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+ const unsigned int kVideoMaxSendBitrateKbps = 2000;
+ const unsigned int kVideoMinSendBitrateKbps = 50;
+ const unsigned int kVideoDefaultStartSendBitrateKbps = 300;
+ VerifyVP8SendCodec(send_channel, kVP8Codec.width, kVP8Codec.height, 0,
+ kVideoMaxSendBitrateKbps, kVideoMinSendBitrateKbps,
+ kVideoDefaultStartSendBitrateKbps);
+ EXPECT_EQ(0, vie_.StartSend(send_channel));
+
+ // Increase the send bitrate and verify it is used as start bitrate.
+ const unsigned int kVideoSendBitrateBps = 768000;
+ vie_.SetSendBitrates(send_channel, kVideoSendBitrateBps, 0, 0);
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+ VerifyVP8SendCodec(send_channel, kVP8Codec.width, kVP8Codec.height, 0,
+ kVideoMaxSendBitrateKbps, kVideoMinSendBitrateKbps,
+ kVideoSendBitrateBps / 1000);
+
+ // Never set a start bitrate higher than the max bitrate.
+ vie_.SetSendBitrates(send_channel, kVideoMaxSendBitrateKbps + 500, 0, 0);
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+ VerifyVP8SendCodec(send_channel, kVP8Codec.width, kVP8Codec.height, 0,
+ kVideoMaxSendBitrateKbps, kVideoMinSendBitrateKbps,
+ kVideoDefaultStartSendBitrateKbps);
+
+ // Use the default start bitrate if the send bitrate is lower.
+ vie_.SetSendBitrates(send_channel, kVideoDefaultStartSendBitrateKbps - 50, 0,
+ 0);
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+ VerifyVP8SendCodec(send_channel, kVP8Codec.width, kVP8Codec.height, 0,
+ kVideoMaxSendBitrateKbps, kVideoMinSendBitrateKbps,
+ kVideoDefaultStartSendBitrateKbps);
+}
+
+
+// Test that we constrain send codecs properly.
+TEST_F(WebRtcVideoEngineTestFake, ConstrainSendCodecs) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ // Set max settings of 640x400x30.
+ EXPECT_TRUE(engine_.SetDefaultEncoderConfig(
+ cricket::VideoEncoderConfig(kVP8Codec)));
+
+ // Send codec format bigger than max setting.
+ cricket::VideoCodec codec(kVP8Codec);
+ codec.width = 1280;
+ codec.height = 800;
+ codec.framerate = 60;
+ std::vector<cricket::VideoCodec> codec_list;
+ codec_list.push_back(codec);
+
+ // Set send codec and verify codec has been constrained.
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+ VerifyVP8SendCodec(channel_num, kVP8Codec.width, kVP8Codec.height);
+}
+
+// Test that SetSendCodecs rejects bad format.
+TEST_F(WebRtcVideoEngineTestFake, SetSendCodecsRejectBadFormat) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ // Set w = 0.
+ cricket::VideoCodec codec(kVP8Codec);
+ codec.width = 0;
+ std::vector<cricket::VideoCodec> codec_list;
+ codec_list.push_back(codec);
+
+ // Verify SetSendCodecs failed and send codec is not changed on engine.
+ EXPECT_FALSE(channel_->SetSendCodecs(codec_list));
+ webrtc::VideoCodec gcodec;
+ // Set plType to something other than the value to test against ensuring
+ // that failure will happen if it is not changed.
+ gcodec.plType = 1;
+ EXPECT_EQ(0, vie_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(0, gcodec.plType);
+
+ // Set h = 0.
+ codec_list[0].width = 640;
+ codec_list[0].height = 0;
+
+ // Verify SetSendCodecs failed and send codec is not changed on engine.
+ EXPECT_FALSE(channel_->SetSendCodecs(codec_list));
+ // Set plType to something other than the value to test against ensuring
+ // that failure will happen if it is not changed.
+ gcodec.plType = 1;
+ EXPECT_EQ(0, vie_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(0, gcodec.plType);
+}
+
+// Test that SetSendCodecs rejects bad codec.
+TEST_F(WebRtcVideoEngineTestFake, SetSendCodecsRejectBadCodec) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ // Set bad codec name.
+ cricket::VideoCodec codec(kVP8Codec);
+ codec.name = "bad";
+ std::vector<cricket::VideoCodec> codec_list;
+ codec_list.push_back(codec);
+
+ // Verify SetSendCodecs failed and send codec is not changed on engine.
+ EXPECT_FALSE(channel_->SetSendCodecs(codec_list));
+ webrtc::VideoCodec gcodec;
+ // Set plType to something other than the value to test against ensuring
+ // that failure will happen if it is not changed.
+ gcodec.plType = 1;
+ EXPECT_EQ(0, vie_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(0, gcodec.plType);
+}
+
+// Test that vie send codec is reset on new video frame size.
+TEST_F(WebRtcVideoEngineTestFake, ResetVieSendCodecOnNewFrameSize) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ // Set send codec.
+ std::vector<cricket::VideoCodec> codec_list;
+ codec_list.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(123)));
+ EXPECT_TRUE(channel_->SetSend(true));
+
+ // Capture a smaller frame and verify vie send codec has been reset to
+ // the new size.
+ SendI420Frame(kVP8Codec.width / 2, kVP8Codec.height / 2);
+ VerifyVP8SendCodec(channel_num, kVP8Codec.width / 2, kVP8Codec.height / 2);
+
+ // Capture a frame bigger than send_codec_ and verify vie send codec has been
+ // reset (and clipped) to send_codec_.
+ SendI420Frame(kVP8Codec.width * 2, kVP8Codec.height * 2);
+ VerifyVP8SendCodec(channel_num, kVP8Codec.width, kVP8Codec.height);
+}
+
+// Test that we set our inbound codecs properly.
+TEST_F(WebRtcVideoEngineTestFake, SetRecvCodecs) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+
+ webrtc::VideoCodec wcodec;
+ EXPECT_TRUE(engine_.ConvertFromCricketVideoCodec(kVP8Codec, &wcodec));
+ EXPECT_TRUE(vie_.ReceiveCodecRegistered(channel_num, wcodec));
+}
+
+// Test that channel connects and disconnects external capturer correctly.
+TEST_F(WebRtcVideoEngineTestFake, HasExternalCapturer) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ EXPECT_EQ(1, vie_.GetNumCapturers());
+ int capture_id = vie_.GetCaptureId(channel_num);
+ EXPECT_EQ(channel_num, vie_.GetCaptureChannelId(capture_id));
+
+ // Delete the channel should disconnect the capturer.
+ delete channel_;
+ channel_ = NULL;
+ EXPECT_EQ(0, vie_.GetNumCapturers());
+}
+
+// Test that channel adds and removes renderer correctly.
+TEST_F(WebRtcVideoEngineTestFake, HasRenderer) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ EXPECT_TRUE(vie_.GetHasRenderer(channel_num));
+ EXPECT_FALSE(vie_.GetRenderStarted(channel_num));
+}
+
+// Test that rtcp is enabled on the channel.
+TEST_F(WebRtcVideoEngineTestFake, RtcpEnabled) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_EQ(webrtc::kRtcpCompound_RFC4585, vie_.GetRtcpStatus(channel_num));
+}
+
+// Test that key frame request method is set on the channel.
+TEST_F(WebRtcVideoEngineTestFake, KeyFrameRequestEnabled) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_EQ(webrtc::kViEKeyFrameRequestPliRtcp,
+ vie_.GetKeyFrameRequestMethod(channel_num));
+}
+
+// Test that remb receive and send is enabled for the default channel in a 1:1
+// call.
+TEST_F(WebRtcVideoEngineTestFake, RembEnabled) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_TRUE(vie_.GetRembStatusBwPartition(channel_num));
+ EXPECT_TRUE(channel_->SetSend(true));
+ EXPECT_TRUE(vie_.GetRembStatusBwPartition(channel_num));
+ EXPECT_TRUE(vie_.GetRembStatusContribute(channel_num));
+}
+
+// When in conference mode, test that remb is enabled on a receive channel but
+// not for the default channel and that it uses the default channel for sending
+// remb packets.
+TEST_F(WebRtcVideoEngineTestFake, RembEnabledOnReceiveChannels) {
+ EXPECT_TRUE(SetupEngine());
+ int default_channel = vie_.GetLastChannel();
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_TRUE(vie_.GetRembStatusBwPartition(default_channel));
+ EXPECT_TRUE(vie_.GetRembStatusContribute(default_channel));
+ EXPECT_TRUE(channel_->SetSend(true));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ int new_channel_num = vie_.GetLastChannel();
+ EXPECT_NE(default_channel, new_channel_num);
+
+ EXPECT_TRUE(vie_.GetRembStatusBwPartition(default_channel));
+ EXPECT_TRUE(vie_.GetRembStatusContribute(default_channel));
+ EXPECT_FALSE(vie_.GetRembStatusBwPartition(new_channel_num));
+ EXPECT_TRUE(vie_.GetRembStatusContribute(new_channel_num));
+}
+
+// Test support for RTP timestamp offset header extension.
+TEST_F(WebRtcVideoEngineTestFake, RtpTimestampOffsetHeaderExtensions) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+
+ // Verify extensions are off by default.
+ EXPECT_EQ(0, vie_.GetSendRtpTimestampOffsetExtensionId(channel_num));
+ EXPECT_EQ(0, vie_.GetReceiveRtpTimestampOffsetExtensionId(channel_num));
+
+ // Enable RTP timestamp extension.
+ const int id = 14;
+ std::vector<cricket::RtpHeaderExtension> extensions;
+ extensions.push_back(cricket::RtpHeaderExtension(
+ "urn:ietf:params:rtp-hdrext:toffset", id));
+
+ // Verify the send extension id.
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+ EXPECT_EQ(id, vie_.GetSendRtpTimestampOffsetExtensionId(channel_num));
+
+ // Remove the extension id.
+ std::vector<cricket::RtpHeaderExtension> empty_extensions;
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(empty_extensions));
+ EXPECT_EQ(0, vie_.GetSendRtpTimestampOffsetExtensionId(channel_num));
+
+ // Verify receive extension id.
+ EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
+ EXPECT_EQ(id, vie_.GetReceiveRtpTimestampOffsetExtensionId(channel_num));
+
+ // Add a new receive stream and verify the extension is set.
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int new_channel_num = vie_.GetLastChannel();
+ EXPECT_NE(channel_num, new_channel_num);
+ EXPECT_EQ(id, vie_.GetReceiveRtpTimestampOffsetExtensionId(new_channel_num));
+
+ // Remove the extension id.
+ EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(empty_extensions));
+ EXPECT_EQ(0, vie_.GetReceiveRtpTimestampOffsetExtensionId(channel_num));
+ EXPECT_EQ(0, vie_.GetReceiveRtpTimestampOffsetExtensionId(new_channel_num));
+}
+
+// Test support for absolute send time header extension.
+TEST_F(WebRtcVideoEngineTestFake, AbsoluteSendTimeHeaderExtensions) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+
+ // Verify extensions are off by default.
+ EXPECT_EQ(0, vie_.GetSendAbsoluteSendTimeExtensionId(channel_num));
+ EXPECT_EQ(0, vie_.GetReceiveAbsoluteSendTimeExtensionId(channel_num));
+
+ // Enable RTP timestamp extension.
+ const int id = 12;
+ std::vector<cricket::RtpHeaderExtension> extensions;
+ extensions.push_back(cricket::RtpHeaderExtension(
+ "http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time", id));
+
+ // Verify the send extension id.
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+ EXPECT_EQ(id, vie_.GetSendAbsoluteSendTimeExtensionId(channel_num));
+
+ // Remove the extension id.
+ std::vector<cricket::RtpHeaderExtension> empty_extensions;
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(empty_extensions));
+ EXPECT_EQ(0, vie_.GetSendAbsoluteSendTimeExtensionId(channel_num));
+
+ // Verify receive extension id.
+ EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
+ EXPECT_EQ(id, vie_.GetReceiveAbsoluteSendTimeExtensionId(channel_num));
+
+ // Add a new receive stream and verify the extension is set.
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int new_channel_num = vie_.GetLastChannel();
+ EXPECT_NE(channel_num, new_channel_num);
+ EXPECT_EQ(id, vie_.GetReceiveAbsoluteSendTimeExtensionId(new_channel_num));
+
+ // Remove the extension id.
+ EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(empty_extensions));
+ EXPECT_EQ(0, vie_.GetReceiveAbsoluteSendTimeExtensionId(channel_num));
+ EXPECT_EQ(0, vie_.GetReceiveAbsoluteSendTimeExtensionId(new_channel_num));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, LeakyBucketTest) {
+ EXPECT_TRUE(SetupEngine());
+
+ // Verify this is off by default.
+ EXPECT_TRUE(channel_->AddSendStream(cricket::StreamParams::CreateLegacy(1)));
+ int first_send_channel = vie_.GetLastChannel();
+ EXPECT_FALSE(vie_.GetTransmissionSmoothingStatus(first_send_channel));
+
+ // Enable the experiment and verify.
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ options.video_leaky_bucket.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ EXPECT_TRUE(vie_.GetTransmissionSmoothingStatus(first_send_channel));
+
+ // Add a receive channel and verify leaky bucket isn't enabled.
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int recv_channel_num = vie_.GetLastChannel();
+ EXPECT_NE(first_send_channel, recv_channel_num);
+ EXPECT_FALSE(vie_.GetTransmissionSmoothingStatus(recv_channel_num));
+
+ // Add a new send stream and verify leaky bucket is enabled from start.
+ EXPECT_TRUE(channel_->AddSendStream(cricket::StreamParams::CreateLegacy(3)));
+ int second_send_channel = vie_.GetLastChannel();
+ EXPECT_NE(first_send_channel, second_send_channel);
+ EXPECT_TRUE(vie_.GetTransmissionSmoothingStatus(second_send_channel));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, BufferedModeLatency) {
+ EXPECT_TRUE(SetupEngine());
+
+ // Verify this is off by default.
+ EXPECT_TRUE(channel_->AddSendStream(cricket::StreamParams::CreateLegacy(1)));
+ int first_send_channel = vie_.GetLastChannel();
+ EXPECT_EQ(0, vie_.GetSenderTargetDelay(first_send_channel));
+ EXPECT_EQ(0, vie_.GetReceiverTargetDelay(first_send_channel));
+
+ // Enable the experiment and verify. The default channel will have both
+ // sender and receiver buffered mode enabled.
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ options.buffered_mode_latency.Set(100);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ EXPECT_EQ(100, vie_.GetSenderTargetDelay(first_send_channel));
+ EXPECT_EQ(100, vie_.GetReceiverTargetDelay(first_send_channel));
+
+ // Add a receive channel and verify sender buffered mode isn't enabled.
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int recv_channel_num = vie_.GetLastChannel();
+ EXPECT_NE(first_send_channel, recv_channel_num);
+ EXPECT_EQ(0, vie_.GetSenderTargetDelay(recv_channel_num));
+ EXPECT_EQ(100, vie_.GetReceiverTargetDelay(recv_channel_num));
+
+ // Add a new send stream and verify sender buffered mode is enabled.
+ EXPECT_TRUE(channel_->AddSendStream(cricket::StreamParams::CreateLegacy(3)));
+ int second_send_channel = vie_.GetLastChannel();
+ EXPECT_NE(first_send_channel, second_send_channel);
+ EXPECT_EQ(100, vie_.GetSenderTargetDelay(second_send_channel));
+ EXPECT_EQ(0, vie_.GetReceiverTargetDelay(second_send_channel));
+
+ // Disable sender buffered mode and verify.
+ options.buffered_mode_latency.Set(cricket::kBufferedModeDisabled);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ EXPECT_EQ(0, vie_.GetSenderTargetDelay(first_send_channel));
+ EXPECT_EQ(0, vie_.GetReceiverTargetDelay(first_send_channel));
+ EXPECT_EQ(0, vie_.GetSenderTargetDelay(second_send_channel));
+ EXPECT_EQ(0, vie_.GetReceiverTargetDelay(second_send_channel));
+ EXPECT_EQ(0, vie_.GetSenderTargetDelay(recv_channel_num));
+ EXPECT_EQ(0, vie_.GetReceiverTargetDelay(recv_channel_num));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, AdditiveVideoOptions) {
+ EXPECT_TRUE(SetupEngine());
+
+ EXPECT_TRUE(channel_->AddSendStream(cricket::StreamParams::CreateLegacy(1)));
+ int first_send_channel = vie_.GetLastChannel();
+ EXPECT_EQ(0, vie_.GetSenderTargetDelay(first_send_channel));
+ EXPECT_EQ(0, vie_.GetReceiverTargetDelay(first_send_channel));
+
+ cricket::VideoOptions options1;
+ options1.buffered_mode_latency.Set(100);
+ EXPECT_TRUE(channel_->SetOptions(options1));
+ EXPECT_EQ(100, vie_.GetSenderTargetDelay(first_send_channel));
+ EXPECT_EQ(100, vie_.GetReceiverTargetDelay(first_send_channel));
+ EXPECT_FALSE(vie_.GetTransmissionSmoothingStatus(first_send_channel));
+
+ cricket::VideoOptions options2;
+ options2.video_leaky_bucket.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options2));
+ EXPECT_TRUE(vie_.GetTransmissionSmoothingStatus(first_send_channel));
+ // The buffered_mode_latency still takes effect.
+ EXPECT_EQ(100, vie_.GetSenderTargetDelay(first_send_channel));
+ EXPECT_EQ(100, vie_.GetReceiverTargetDelay(first_send_channel));
+
+ options1.buffered_mode_latency.Set(50);
+ EXPECT_TRUE(channel_->SetOptions(options1));
+ EXPECT_EQ(50, vie_.GetSenderTargetDelay(first_send_channel));
+ EXPECT_EQ(50, vie_.GetReceiverTargetDelay(first_send_channel));
+ // The video_leaky_bucket still takes effect.
+ EXPECT_TRUE(vie_.GetTransmissionSmoothingStatus(first_send_channel));
+}
+
+// Test that AddRecvStream doesn't create new channel for 1:1 call.
+TEST_F(WebRtcVideoEngineTestFake, AddRecvStream1On1) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_EQ(channel_num, vie_.GetLastChannel());
+}
+
+// Test that AddRecvStream doesn't change remb for 1:1 call.
+TEST_F(WebRtcVideoEngineTestFake, NoRembChangeAfterAddRecvStream) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_TRUE(vie_.GetRembStatusBwPartition(channel_num));
+ EXPECT_TRUE(vie_.GetRembStatusContribute(channel_num));
+ EXPECT_TRUE(channel_->SetSend(true));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(vie_.GetRembStatusBwPartition(channel_num));
+ EXPECT_TRUE(vie_.GetRembStatusContribute(channel_num));
+}
+
+// Verify default REMB setting and that it can be turned on and off.
+TEST_F(WebRtcVideoEngineTestFake, RembOnOff) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ // Verify REMB sending is always off by default.
+ EXPECT_FALSE(vie_.GetRembStatusBwPartition(channel_num));
+
+ // Verify that REMB is turned on when setting default codecs since the
+ // default codecs have REMB enabled.
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_TRUE(vie_.GetRembStatusBwPartition(channel_num));
+
+ // Verify that REMB is turned off when codecs without REMB are set.
+ std::vector<cricket::VideoCodec> codecs = engine_.codecs();
+ // Clearing the codecs' FeedbackParams and setting send codecs should disable
+ // REMB.
+ for (std::vector<cricket::VideoCodec>::iterator iter = codecs.begin();
+ iter != codecs.end(); ++iter) {
+ // Intersecting with empty will clear the FeedbackParams.
+ cricket::FeedbackParams empty_params;
+ iter->feedback_params.Intersect(empty_params);
+ EXPECT_TRUE(iter->feedback_params.params().empty());
+ }
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_FALSE(vie_.GetRembStatusBwPartition(channel_num));
+}
+
+// Test that nack is enabled on the channel if we don't offer red/fec.
+TEST_F(WebRtcVideoEngineTestFake, NackEnabled) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ std::vector<cricket::VideoCodec> codecs(engine_.codecs());
+ codecs.resize(1); // toss out red and ulpfec
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(vie_.GetNackStatus(channel_num));
+}
+
+// Test that we enable hybrid NACK FEC mode.
+TEST_F(WebRtcVideoEngineTestFake, HybridNackFec) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_TRUE(channel_->SetRecvCodecs(engine_.codecs()));
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_TRUE(vie_.GetHybridNackFecStatus(channel_num));
+ EXPECT_FALSE(vie_.GetNackStatus(channel_num));
+}
+
+// Test that we enable hybrid NACK FEC mode when calling SetSendCodecs and
+// SetReceiveCodecs in reversed order.
+TEST_F(WebRtcVideoEngineTestFake, HybridNackFecReversedOrder) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_TRUE(channel_->SetRecvCodecs(engine_.codecs()));
+ EXPECT_TRUE(vie_.GetHybridNackFecStatus(channel_num));
+ EXPECT_FALSE(vie_.GetNackStatus(channel_num));
+}
+
+// Test NACK vs Hybrid NACK/FEC interop call setup, i.e. only use NACK even if
+// red/fec is offered as receive codec.
+TEST_F(WebRtcVideoEngineTestFake, VideoProtectionInterop) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ std::vector<cricket::VideoCodec> recv_codecs(engine_.codecs());
+ std::vector<cricket::VideoCodec> send_codecs(engine_.codecs());
+ // Only add VP8 as send codec.
+ send_codecs.resize(1);
+ EXPECT_TRUE(channel_->SetRecvCodecs(recv_codecs));
+ EXPECT_TRUE(channel_->SetSendCodecs(send_codecs));
+ EXPECT_FALSE(vie_.GetHybridNackFecStatus(channel_num));
+ EXPECT_TRUE(vie_.GetNackStatus(channel_num));
+}
+
+// Test NACK vs Hybrid NACK/FEC interop call setup, i.e. only use NACK even if
+// red/fec is offered as receive codec. Call order reversed compared to
+// VideoProtectionInterop.
+TEST_F(WebRtcVideoEngineTestFake, VideoProtectionInteropReversed) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ std::vector<cricket::VideoCodec> recv_codecs(engine_.codecs());
+ std::vector<cricket::VideoCodec> send_codecs(engine_.codecs());
+ // Only add VP8 as send codec.
+ send_codecs.resize(1);
+ EXPECT_TRUE(channel_->SetSendCodecs(send_codecs));
+ EXPECT_TRUE(channel_->SetRecvCodecs(recv_codecs));
+ EXPECT_FALSE(vie_.GetHybridNackFecStatus(channel_num));
+ EXPECT_TRUE(vie_.GetNackStatus(channel_num));
+}
+
+// Test that NACK, not hybrid mode, is enabled in conference mode.
+TEST_F(WebRtcVideoEngineTestFake, HybridNackFecConference) {
+ EXPECT_TRUE(SetupEngine());
+ // Setup the send channel.
+ int send_channel_num = vie_.GetLastChannel();
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ EXPECT_TRUE(channel_->SetRecvCodecs(engine_.codecs()));
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_FALSE(vie_.GetHybridNackFecStatus(send_channel_num));
+ EXPECT_TRUE(vie_.GetNackStatus(send_channel_num));
+ // Add a receive stream.
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ int receive_channel_num = vie_.GetLastChannel();
+ EXPECT_FALSE(vie_.GetHybridNackFecStatus(receive_channel_num));
+ EXPECT_TRUE(vie_.GetNackStatus(receive_channel_num));
+}
+
+// Test that when AddRecvStream in conference mode, a new channel is created
+// for receiving. And the new channel's "original channel" is the send channel.
+TEST_F(WebRtcVideoEngineTestFake, AddRemoveRecvStreamConference) {
+ EXPECT_TRUE(SetupEngine());
+ // Setup the send channel.
+ int send_channel_num = vie_.GetLastChannel();
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ // Add a receive stream.
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ int receive_channel_num = vie_.GetLastChannel();
+ EXPECT_EQ(send_channel_num, vie_.GetOriginalChannelId(receive_channel_num));
+ EXPECT_TRUE(channel_->RemoveRecvStream(1));
+ EXPECT_FALSE(vie_.IsChannel(receive_channel_num));
+}
+
+// Test that we can create a channel and start/stop rendering out on it.
+TEST_F(WebRtcVideoEngineTestFake, SetRender) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ // Verify we can start/stop/start/stop rendering.
+ EXPECT_TRUE(channel_->SetRender(true));
+ EXPECT_TRUE(vie_.GetRenderStarted(channel_num));
+ EXPECT_TRUE(channel_->SetRender(false));
+ EXPECT_FALSE(vie_.GetRenderStarted(channel_num));
+ EXPECT_TRUE(channel_->SetRender(true));
+ EXPECT_TRUE(vie_.GetRenderStarted(channel_num));
+ EXPECT_TRUE(channel_->SetRender(false));
+ EXPECT_FALSE(vie_.GetRenderStarted(channel_num));
+}
+
+// Test that we can create a channel and start/stop sending out on it.
+TEST_F(WebRtcVideoEngineTestFake, SetSend) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ // Set send codecs on the channel.
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(123)));
+
+ // Verify we can start/stop/start/stop sending.
+ EXPECT_TRUE(channel_->SetSend(true));
+ EXPECT_TRUE(vie_.GetSend(channel_num));
+ EXPECT_TRUE(channel_->SetSend(false));
+ EXPECT_FALSE(vie_.GetSend(channel_num));
+ EXPECT_TRUE(channel_->SetSend(true));
+ EXPECT_TRUE(vie_.GetSend(channel_num));
+ EXPECT_TRUE(channel_->SetSend(false));
+ EXPECT_FALSE(vie_.GetSend(channel_num));
+}
+
+// Test that we set bandwidth properly when using full auto bandwidth mode.
+TEST_F(WebRtcVideoEngineTestFake, SetBandwidthAuto) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_TRUE(channel_->SetSendBandwidth(true, cricket::kAutoBandwidth));
+ VerifyVP8SendCodec(channel_num, kVP8Codec.width, kVP8Codec.height);
+}
+
+// Test that we set bandwidth properly when using auto with upper bound.
+TEST_F(WebRtcVideoEngineTestFake, SetBandwidthAutoCapped) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_TRUE(channel_->SetSendBandwidth(true, 768000));
+ VerifyVP8SendCodec(channel_num, kVP8Codec.width, kVP8Codec.height, 0, 768U);
+}
+
+// Test that we set bandwidth properly when using a fixed bandwidth.
+TEST_F(WebRtcVideoEngineTestFake, SetBandwidthFixed) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ EXPECT_TRUE(channel_->SetSendBandwidth(false, 768000));
+ VerifyVP8SendCodec(channel_num, kVP8Codec.width, kVP8Codec.height, 0,
+ 768U, 768U, 768U);
+}
+
+// Test that SetSendBandwidth is ignored in conference mode.
+TEST_F(WebRtcVideoEngineTestFake, SetBandwidthInConference) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+ VerifyVP8SendCodec(channel_num, kVP8Codec.width, kVP8Codec.height);
+
+ // Set send bandwidth.
+ EXPECT_TRUE(channel_->SetSendBandwidth(false, 768000));
+
+ // Verify bitrate not changed.
+ webrtc::VideoCodec gcodec;
+ EXPECT_EQ(0, vie_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(kMinBandwidthKbps, gcodec.minBitrate);
+ EXPECT_EQ(kStartBandwidthKbps, gcodec.startBitrate);
+ EXPECT_EQ(kMaxBandwidthKbps, gcodec.maxBitrate);
+ EXPECT_NE(768U, gcodec.minBitrate);
+ EXPECT_NE(768U, gcodec.startBitrate);
+ EXPECT_NE(768U, gcodec.maxBitrate);
+}
+
+// Test that sending screencast frames doesn't change bitrate.
+TEST_F(WebRtcVideoEngineTestFake, SetBandwidthScreencast) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ // Set send codec.
+ cricket::VideoCodec codec(kVP8Codec);
+ std::vector<cricket::VideoCodec> codec_list;
+ codec_list.push_back(codec);
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(123)));
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+ EXPECT_TRUE(channel_->SetSendBandwidth(false, 111000));
+ EXPECT_TRUE(channel_->SetSend(true));
+
+ SendI420ScreencastFrame(kVP8Codec.width, kVP8Codec.height);
+ VerifyVP8SendCodec(channel_num, kVP8Codec.width, kVP8Codec.height, 0,
+ 111, 111, 111);
+}
+
+
+// Test SetSendSsrc.
+TEST_F(WebRtcVideoEngineTestFake, SetSendSsrcAndCname) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ cricket::StreamParams stream;
+ stream.ssrcs.push_back(1234);
+ stream.cname = "cname";
+ channel_->AddSendStream(stream);
+
+ unsigned int ssrc = 0;
+ EXPECT_EQ(0, vie_.GetLocalSSRC(channel_num, ssrc));
+ EXPECT_EQ(1234U, ssrc);
+ EXPECT_EQ(1, vie_.GetNumSsrcs(channel_num));
+
+ char rtcp_cname[256];
+ EXPECT_EQ(0, vie_.GetRTCPCName(channel_num, rtcp_cname));
+ EXPECT_STREQ("cname", rtcp_cname);
+}
+
+
+// Test that the local SSRC is the same on sending and receiving channels if the
+// receive channel is created before the send channel.
+TEST_F(WebRtcVideoEngineTestFake, SetSendSsrcAfterCreatingReceiveChannel) {
+ EXPECT_TRUE(SetupEngine());
+
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ int receive_channel_num = vie_.GetLastChannel();
+ cricket::StreamParams stream = cricket::StreamParams::CreateLegacy(1234);
+ EXPECT_TRUE(channel_->AddSendStream(stream));
+ int send_channel_num = vie_.GetLastChannel();
+ unsigned int ssrc = 0;
+ EXPECT_EQ(0, vie_.GetLocalSSRC(send_channel_num, ssrc));
+ EXPECT_EQ(1234U, ssrc);
+ EXPECT_EQ(1, vie_.GetNumSsrcs(send_channel_num));
+ ssrc = 0;
+ EXPECT_EQ(0, vie_.GetLocalSSRC(receive_channel_num, ssrc));
+ EXPECT_EQ(1234U, ssrc);
+ EXPECT_EQ(1, vie_.GetNumSsrcs(receive_channel_num));
+}
+
+
+// Test SetOptions with denoising flag.
+TEST_F(WebRtcVideoEngineTestFake, SetOptionsWithDenoising) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_EQ(1, vie_.GetNumCapturers());
+ int channel_num = vie_.GetLastChannel();
+ int capture_id = vie_.GetCaptureId(channel_num);
+ // Set send codecs on the channel.
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ // Set options with OPT_VIDEO_NOISE_REDUCTION flag.
+ cricket::VideoOptions options;
+ options.video_noise_reduction.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+
+ // Verify capture has denoising turned on.
+ webrtc::VideoCodec send_codec;
+ memset(&send_codec, 0, sizeof(send_codec)); // avoid uninitialized warning
+ EXPECT_EQ(0, vie_.GetSendCodec(channel_num, send_codec));
+ EXPECT_TRUE(send_codec.codecSpecific.VP8.denoisingOn);
+ EXPECT_FALSE(vie_.GetCaptureDenoising(capture_id));
+
+ // Set options back to zero.
+ options.video_noise_reduction.Set(false);
+ EXPECT_TRUE(channel_->SetOptions(options));
+
+ // Verify capture has denoising turned off.
+ EXPECT_EQ(0, vie_.GetSendCodec(channel_num, send_codec));
+ EXPECT_FALSE(send_codec.codecSpecific.VP8.denoisingOn);
+ EXPECT_FALSE(vie_.GetCaptureDenoising(capture_id));
+}
+
+
+TEST_F(WebRtcVideoEngineTestFake, SendReceiveBitratesStats) {
+ EXPECT_TRUE(SetupEngine());
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(1)));
+ int send_channel = vie_.GetLastChannel();
+ cricket::VideoCodec codec(kVP8Codec720p);
+ std::vector<cricket::VideoCodec> codec_list;
+ codec_list.push_back(codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(2)));
+ int first_receive_channel = vie_.GetLastChannel();
+ EXPECT_NE(send_channel, first_receive_channel);
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(3)));
+ int second_receive_channel = vie_.GetLastChannel();
+ EXPECT_NE(first_receive_channel, second_receive_channel);
+
+ cricket::VideoMediaInfo info;
+ EXPECT_TRUE(channel_->GetStats(&info));
+ ASSERT_EQ(1U, info.bw_estimations.size());
+ ASSERT_EQ(0, info.bw_estimations[0].actual_enc_bitrate);
+ ASSERT_EQ(0, info.bw_estimations[0].transmit_bitrate);
+ ASSERT_EQ(0, info.bw_estimations[0].retransmit_bitrate);
+ ASSERT_EQ(0, info.bw_estimations[0].available_send_bandwidth);
+ ASSERT_EQ(0, info.bw_estimations[0].available_recv_bandwidth);
+ ASSERT_EQ(0, info.bw_estimations[0].target_enc_bitrate);
+
+ // Start sending and receiving on one of the channels and verify bitrates.
+ EXPECT_EQ(0, vie_.StartSend(send_channel));
+ int send_video_bitrate = 800;
+ int send_fec_bitrate = 100;
+ int send_nack_bitrate = 20;
+ int send_total_bitrate = send_video_bitrate + send_fec_bitrate +
+ send_nack_bitrate;
+ int send_bandwidth = 950;
+ vie_.SetSendBitrates(send_channel, send_video_bitrate, send_fec_bitrate,
+ send_nack_bitrate);
+ vie_.SetSendBandwidthEstimate(send_channel, send_bandwidth);
+
+ EXPECT_EQ(0, vie_.StartReceive(first_receive_channel));
+ int first_channel_receive_bandwidth = 600;
+ vie_.SetReceiveBandwidthEstimate(first_receive_channel,
+ first_channel_receive_bandwidth);
+
+ info.Clear();
+ EXPECT_TRUE(channel_->GetStats(&info));
+ ASSERT_EQ(1U, info.bw_estimations.size());
+ ASSERT_EQ(send_video_bitrate, info.bw_estimations[0].actual_enc_bitrate);
+ ASSERT_EQ(send_total_bitrate, info.bw_estimations[0].transmit_bitrate);
+ ASSERT_EQ(send_nack_bitrate, info.bw_estimations[0].retransmit_bitrate);
+ ASSERT_EQ(send_bandwidth, info.bw_estimations[0].available_send_bandwidth);
+ ASSERT_EQ(first_channel_receive_bandwidth,
+ info.bw_estimations[0].available_recv_bandwidth);
+ ASSERT_EQ(send_video_bitrate, info.bw_estimations[0].target_enc_bitrate);
+
+ // Start receiving on the second channel and verify received rate.
+ EXPECT_EQ(0, vie_.StartReceive(second_receive_channel));
+ int second_channel_receive_bandwidth = 100;
+ vie_.SetReceiveBandwidthEstimate(second_receive_channel,
+ second_channel_receive_bandwidth);
+
+ info.Clear();
+ EXPECT_TRUE(channel_->GetStats(&info));
+ ASSERT_EQ(1U, info.bw_estimations.size());
+ ASSERT_EQ(send_video_bitrate, info.bw_estimations[0].actual_enc_bitrate);
+ ASSERT_EQ(send_total_bitrate, info.bw_estimations[0].transmit_bitrate);
+ ASSERT_EQ(send_nack_bitrate, info.bw_estimations[0].retransmit_bitrate);
+ ASSERT_EQ(send_bandwidth, info.bw_estimations[0].available_send_bandwidth);
+ ASSERT_EQ(first_channel_receive_bandwidth + second_channel_receive_bandwidth,
+ info.bw_estimations[0].available_recv_bandwidth);
+ ASSERT_EQ(send_video_bitrate, info.bw_estimations[0].target_enc_bitrate);
+}
+
+TEST_F(WebRtcVideoEngineTestFake, TestSetAdaptInputToCpuUsage) {
+ EXPECT_TRUE(SetupEngine());
+ cricket::VideoOptions options_in, options_out;
+ bool cpu_adapt = false;
+ channel_->SetOptions(options_in);
+ EXPECT_TRUE(channel_->GetOptions(&options_out));
+ EXPECT_FALSE(options_out.adapt_input_to_cpu_usage.Get(&cpu_adapt));
+ // Set adapt input CPU usage option.
+ options_in.adapt_input_to_cpu_usage.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options_in));
+ EXPECT_TRUE(channel_->GetOptions(&options_out));
+ EXPECT_TRUE(options_out.adapt_input_to_cpu_usage.Get(&cpu_adapt));
+ EXPECT_TRUE(cpu_adapt);
+}
+
+TEST_F(WebRtcVideoEngineTestFake, TestSetCpuThreshold) {
+ EXPECT_TRUE(SetupEngine());
+ float low, high;
+ cricket::VideoOptions options_in, options_out;
+ // Verify that initial values are set.
+ EXPECT_TRUE(channel_->GetOptions(&options_out));
+ EXPECT_TRUE(options_out.system_low_adaptation_threshhold.Get(&low));
+ EXPECT_EQ(low, 0.65f);
+ EXPECT_TRUE(options_out.system_high_adaptation_threshhold.Get(&high));
+ EXPECT_EQ(high, 0.85f);
+ // Set new CPU threshold values.
+ options_in.system_low_adaptation_threshhold.Set(0.45f);
+ options_in.system_high_adaptation_threshhold.Set(0.95f);
+ EXPECT_TRUE(channel_->SetOptions(options_in));
+ EXPECT_TRUE(channel_->GetOptions(&options_out));
+ EXPECT_TRUE(options_out.system_low_adaptation_threshhold.Get(&low));
+ EXPECT_EQ(low, 0.45f);
+ EXPECT_TRUE(options_out.system_high_adaptation_threshhold.Get(&high));
+ EXPECT_EQ(high, 0.95f);
+}
+
+TEST_F(WebRtcVideoEngineTestFake, TestSetInvalidCpuThreshold) {
+ EXPECT_TRUE(SetupEngine());
+ float low, high;
+ cricket::VideoOptions options_in, options_out;
+ // Valid range is [0, 1].
+ options_in.system_low_adaptation_threshhold.Set(-1.5f);
+ options_in.system_high_adaptation_threshhold.Set(1.5f);
+ EXPECT_TRUE(channel_->SetOptions(options_in));
+ EXPECT_TRUE(channel_->GetOptions(&options_out));
+ EXPECT_TRUE(options_out.system_low_adaptation_threshhold.Get(&low));
+ EXPECT_EQ(low, 0.0f);
+ EXPECT_TRUE(options_out.system_high_adaptation_threshhold.Get(&high));
+ EXPECT_EQ(high, 1.0f);
+}
+
+
+/////////////////////////
+// Tests with real ViE //
+/////////////////////////
+
+// Tests that we can find codecs by name or id.
+TEST_F(WebRtcVideoEngineTest, FindCodec) {
+ // We should not need to init engine in order to get codecs.
+ const std::vector<cricket::VideoCodec>& c = engine_.codecs();
+ EXPECT_EQ(3U, c.size());
+
+ cricket::VideoCodec vp8(104, "VP8", 320, 200, 30, 0);
+ EXPECT_TRUE(engine_.FindCodec(vp8));
+
+ cricket::VideoCodec vp8_ci(104, "vp8", 320, 200, 30, 0);
+ EXPECT_TRUE(engine_.FindCodec(vp8));
+
+ cricket::VideoCodec vp8_diff_fr_diff_pref(104, "VP8", 320, 200, 50, 50);
+ EXPECT_TRUE(engine_.FindCodec(vp8_diff_fr_diff_pref));
+
+ cricket::VideoCodec vp8_diff_id(95, "VP8", 320, 200, 30, 0);
+ EXPECT_FALSE(engine_.FindCodec(vp8_diff_id));
+ vp8_diff_id.id = 97;
+ EXPECT_TRUE(engine_.FindCodec(vp8_diff_id));
+
+ cricket::VideoCodec vp8_diff_res(104, "VP8", 320, 111, 30, 0);
+ EXPECT_FALSE(engine_.FindCodec(vp8_diff_res));
+
+ // PeerConnection doesn't negotiate the resolution at this point.
+ // Test that FindCodec can handle the case when width/height is 0.
+ cricket::VideoCodec vp8_zero_res(104, "VP8", 0, 0, 30, 0);
+ EXPECT_TRUE(engine_.FindCodec(vp8_zero_res));
+
+ cricket::VideoCodec red(101, "RED", 0, 0, 30, 0);
+ EXPECT_TRUE(engine_.FindCodec(red));
+
+ cricket::VideoCodec red_ci(101, "red", 0, 0, 30, 0);
+ EXPECT_TRUE(engine_.FindCodec(red));
+
+ cricket::VideoCodec fec(102, "ULPFEC", 0, 0, 30, 0);
+ EXPECT_TRUE(engine_.FindCodec(fec));
+
+ cricket::VideoCodec fec_ci(102, "ulpfec", 0, 0, 30, 0);
+ EXPECT_TRUE(engine_.FindCodec(fec));
+}
+
+TEST_F(WebRtcVideoEngineTest, StartupShutdown) {
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ engine_.Terminate();
+}
+
+TEST_PRE_VIDEOENGINE_INIT(WebRtcVideoEngineTest, ConstrainNewCodec)
+TEST_POST_VIDEOENGINE_INIT(WebRtcVideoEngineTest, ConstrainNewCodec)
+
+TEST_PRE_VIDEOENGINE_INIT(WebRtcVideoEngineTest, ConstrainRunningCodec)
+TEST_POST_VIDEOENGINE_INIT(WebRtcVideoEngineTest, ConstrainRunningCodec)
+
+// TODO(juberti): Figure out why ViE is munging the COM refcount.
+#ifdef WIN32
+TEST_F(WebRtcVideoEngineTest, DISABLED_CheckCoInitialize) {
+ Base::CheckCoInitialize();
+}
+#endif
+
+TEST_F(WebRtcVideoEngineTest, CreateChannel) {
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ cricket::VideoMediaChannel* channel = engine_.CreateChannel(NULL);
+ EXPECT_TRUE(channel != NULL);
+ delete channel;
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, SetRecvCodecs) {
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+}
+TEST_F(WebRtcVideoMediaChannelTest, SetRecvCodecsWrongPayloadType) {
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ codecs[0].id = 99;
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+}
+TEST_F(WebRtcVideoMediaChannelTest, SetRecvCodecsUnsupportedCodec) {
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ codecs.push_back(cricket::VideoCodec(101, "VP1", 640, 400, 30, 0));
+ EXPECT_FALSE(channel_->SetRecvCodecs(codecs));
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, SetSend) {
+ Base::SetSend();
+}
+TEST_F(WebRtcVideoMediaChannelTest, SetSendWithoutCodecs) {
+ Base::SetSendWithoutCodecs();
+}
+TEST_F(WebRtcVideoMediaChannelTest, SetSendSetsTransportBufferSizes) {
+ Base::SetSendSetsTransportBufferSizes();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, SendAndReceiveVp8Vga) {
+ SendAndReceive(cricket::VideoCodec(100, "VP8", 640, 400, 30, 0));
+}
+TEST_F(WebRtcVideoMediaChannelTest, SendAndReceiveVp8Qvga) {
+ SendAndReceive(cricket::VideoCodec(100, "VP8", 320, 200, 30, 0));
+}
+TEST_F(WebRtcVideoMediaChannelTest, SendAndReceiveH264SvcQqvga) {
+ SendAndReceive(cricket::VideoCodec(100, "VP8", 160, 100, 30, 0));
+}
+TEST_F(WebRtcVideoMediaChannelTest, SendManyResizeOnce) {
+ SendManyResizeOnce();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, SendVp8HdAndReceiveAdaptedVp8Vga) {
+ EXPECT_TRUE(channel_->SetCapturer(kSsrc, NULL));
+ channel_->UpdateAspectRatio(1280, 720);
+ video_capturer_.reset(new cricket::FakeVideoCapturer);
+ const std::vector<cricket::VideoFormat>* formats =
+ video_capturer_->GetSupportedFormats();
+ cricket::VideoFormat capture_format_hd = (*formats)[0];
+ EXPECT_EQ(cricket::CS_RUNNING, video_capturer_->Start(capture_format_hd));
+ EXPECT_TRUE(channel_->SetCapturer(kSsrc, video_capturer_.get()));
+
+ // Capture format HD -> adapt (OnOutputFormatRequest VGA) -> VGA.
+ cricket::VideoCodec codec(100, "VP8", 1280, 720, 30, 0);
+ EXPECT_TRUE(SetOneCodec(codec));
+ codec.width /= 2;
+ codec.height /= 2;
+ EXPECT_TRUE(channel_->SetSendStreamFormat(kSsrc, cricket::VideoFormat(
+ codec.width, codec.height,
+ cricket::VideoFormat::FpsToInterval(codec.framerate),
+ cricket::FOURCC_ANY)));
+ EXPECT_TRUE(SetSend(true));
+ EXPECT_TRUE(channel_->SetRender(true));
+ EXPECT_EQ(0, renderer_.num_rendered_frames());
+ EXPECT_TRUE(SendFrame());
+ EXPECT_FRAME_WAIT(1, codec.width, codec.height, kTimeout);
+}
+
+// TODO(juberti): Fix this test to tolerate missing stats.
+TEST_F(WebRtcVideoMediaChannelTest, DISABLED_GetStats) {
+ Base::GetStats();
+}
+
+// TODO(juberti): Fix this test to tolerate missing stats.
+TEST_F(WebRtcVideoMediaChannelTest, DISABLED_GetStatsMultipleRecvStreams) {
+ Base::GetStatsMultipleRecvStreams();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, GetStatsMultipleSendStreams) {
+ Base::GetStatsMultipleSendStreams();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, SetSendBandwidth) {
+ Base::SetSendBandwidth();
+}
+TEST_F(WebRtcVideoMediaChannelTest, SetSendSsrc) {
+ Base::SetSendSsrc();
+}
+TEST_F(WebRtcVideoMediaChannelTest, SetSendSsrcAfterSetCodecs) {
+ Base::SetSendSsrcAfterSetCodecs();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, SetRenderer) {
+ Base::SetRenderer();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, AddRemoveRecvStreams) {
+ Base::AddRemoveRecvStreams();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, AddRemoveRecvStreamAndRender) {
+ Base::AddRemoveRecvStreamAndRender();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, AddRemoveRecvStreamsNoConference) {
+ Base::AddRemoveRecvStreamsNoConference();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, AddRemoveSendStreams) {
+ Base::AddRemoveSendStreams();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, SimulateConference) {
+ Base::SimulateConference();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, AddRemoveCapturer) {
+ Base::AddRemoveCapturer();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, RemoveCapturerWithoutAdd) {
+ Base::RemoveCapturerWithoutAdd();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, AddRemoveCapturerMultipleSources) {
+ Base::AddRemoveCapturerMultipleSources();
+}
+
+
+TEST_F(WebRtcVideoMediaChannelTest, SetOptionsSucceedsWhenSending) {
+ cricket::VideoOptions options;
+ options.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+
+ // Verify SetOptions returns true on a different options.
+ cricket::VideoOptions options2;
+ options2.adapt_input_to_cpu_usage.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options2));
+
+ // Set send codecs on the channel and start sending.
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(true));
+
+ // Verify SetOptions returns true if channel is already sending.
+ cricket::VideoOptions options3;
+ options3.conference_mode.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options3));
+}
+
+// Tests empty StreamParams is rejected.
+TEST_F(WebRtcVideoMediaChannelTest, RejectEmptyStreamParams) {
+ Base::RejectEmptyStreamParams();
+}
+
+
+TEST_F(WebRtcVideoMediaChannelTest, AdaptResolution16x10) {
+ Base::AdaptResolution16x10();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, AdaptResolution4x3) {
+ Base::AdaptResolution4x3();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, MuteStream) {
+ Base::MuteStream();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, MultipleSendStreams) {
+ Base::MultipleSendStreams();
+}
+
+// TODO(juberti): Restore this test once we support sending 0 fps.
+TEST_F(WebRtcVideoMediaChannelTest, DISABLED_AdaptDropAllFrames) {
+ Base::AdaptDropAllFrames();
+}
+// TODO(juberti): Understand why we get decode errors on this test.
+TEST_F(WebRtcVideoMediaChannelTest, DISABLED_AdaptFramerate) {
+ Base::AdaptFramerate();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, SetSendStreamFormat0x0) {
+ Base::SetSendStreamFormat0x0();
+}
+
+// TODO(zhurunz): Fix the flakey test.
+TEST_F(WebRtcVideoMediaChannelTest, DISABLED_SetSendStreamFormat) {
+ Base::SetSendStreamFormat();
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, TwoStreamsSendAndReceive) {
+ Base::TwoStreamsSendAndReceive(cricket::VideoCodec(100, "VP8", 640, 400, 30,
+ 0));
+}
+
+TEST_F(WebRtcVideoMediaChannelTest, TwoStreamsReUseFirstStream) {
+ Base::TwoStreamsReUseFirstStream(cricket::VideoCodec(100, "VP8", 640, 400, 30,
+ 0));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, ResetCodecOnScreencast) {
+ EXPECT_TRUE(SetupEngine());
+ cricket::VideoOptions options;
+ options.video_noise_reduction.Set(true);
+ EXPECT_TRUE(channel_->SetOptions(options));
+
+ // Set send codec.
+ cricket::VideoCodec codec(kVP8Codec);
+ std::vector<cricket::VideoCodec> codec_list;
+ codec_list.push_back(codec);
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(123)));
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+ EXPECT_TRUE(channel_->SetSend(true));
+ EXPECT_EQ(1, vie_.num_set_send_codecs());
+
+ webrtc::VideoCodec gcodec;
+ memset(&gcodec, 0, sizeof(gcodec));
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_EQ(0, vie_.GetSendCodec(channel_num, gcodec));
+ EXPECT_TRUE(gcodec.codecSpecific.VP8.denoisingOn);
+
+ // Send a screencast frame with the same size.
+ // Verify that denoising is turned off.
+ SendI420ScreencastFrame(kVP8Codec.width, kVP8Codec.height);
+ EXPECT_EQ(2, vie_.num_set_send_codecs());
+ EXPECT_EQ(0, vie_.GetSendCodec(channel_num, gcodec));
+ EXPECT_FALSE(gcodec.codecSpecific.VP8.denoisingOn);
+}
+
+
+TEST_F(WebRtcVideoEngineTestFake, DontRegisterDecoderIfFactoryIsNotGiven) {
+ engine_.SetExternalDecoderFactory(NULL);
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+
+ EXPECT_EQ(0, vie_.GetNumExternalDecoderRegistered(channel_num));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, RegisterDecoderIfFactoryIsGiven) {
+ decoder_factory_.AddSupportedVideoCodecType(webrtc::kVideoCodecVP8);
+ engine_.SetExternalDecoderFactory(&decoder_factory_);
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+
+ EXPECT_TRUE(vie_.ExternalDecoderRegistered(channel_num, 100));
+ EXPECT_EQ(1, vie_.GetNumExternalDecoderRegistered(channel_num));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, DontRegisterDecoderMultipleTimes) {
+ decoder_factory_.AddSupportedVideoCodecType(webrtc::kVideoCodecVP8);
+ engine_.SetExternalDecoderFactory(&decoder_factory_);
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+
+ EXPECT_TRUE(vie_.ExternalDecoderRegistered(channel_num, 100));
+ EXPECT_EQ(1, vie_.GetNumExternalDecoderRegistered(channel_num));
+ EXPECT_EQ(1, decoder_factory_.GetNumCreatedDecoders());
+
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ EXPECT_EQ(1, vie_.GetNumExternalDecoderRegistered(channel_num));
+ EXPECT_EQ(1, decoder_factory_.GetNumCreatedDecoders());
+}
+
+TEST_F(WebRtcVideoEngineTestFake, DontRegisterDecoderForNonVP8) {
+ decoder_factory_.AddSupportedVideoCodecType(webrtc::kVideoCodecVP8);
+ engine_.SetExternalDecoderFactory(&decoder_factory_);
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kRedCodec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+
+ EXPECT_EQ(0, vie_.GetNumExternalDecoderRegistered(channel_num));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, DontRegisterEncoderIfFactoryIsNotGiven) {
+ engine_.SetExternalEncoderFactory(NULL);
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ EXPECT_EQ(0, vie_.GetNumExternalEncoderRegistered(channel_num));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, RegisterEncoderIfFactoryIsGiven) {
+ encoder_factory_.AddSupportedVideoCodecType(webrtc::kVideoCodecVP8, "VP8");
+ engine_.SetExternalEncoderFactory(&encoder_factory_);
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ EXPECT_TRUE(vie_.ExternalEncoderRegistered(channel_num, 100));
+ EXPECT_EQ(1, vie_.GetNumExternalEncoderRegistered(channel_num));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, DontRegisterEncoderMultipleTimes) {
+ encoder_factory_.AddSupportedVideoCodecType(webrtc::kVideoCodecVP8, "VP8");
+ engine_.SetExternalEncoderFactory(&encoder_factory_);
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ EXPECT_TRUE(vie_.ExternalEncoderRegistered(channel_num, 100));
+ EXPECT_EQ(1, vie_.GetNumExternalEncoderRegistered(channel_num));
+ EXPECT_EQ(1, encoder_factory_.GetNumCreatedEncoders());
+
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(1, vie_.GetNumExternalEncoderRegistered(channel_num));
+ EXPECT_EQ(1, encoder_factory_.GetNumCreatedEncoders());
+}
+
+TEST_F(WebRtcVideoEngineTestFake, RegisterEncoderWithMultipleSendStreams) {
+ encoder_factory_.AddSupportedVideoCodecType(webrtc::kVideoCodecVP8, "VP8");
+ engine_.SetExternalEncoderFactory(&encoder_factory_);
+ EXPECT_TRUE(SetupEngine());
+
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(1, vie_.GetTotalNumExternalEncoderRegistered());
+
+ // When we add the first stream (1234), it reuses the default send channel,
+ // so it doesn't increase the registration count of external encoders.
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(1234)));
+ EXPECT_EQ(1, vie_.GetTotalNumExternalEncoderRegistered());
+
+ // When we add the second stream (2345), it creates a new channel and
+ // increments the registration count.
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(2345)));
+ EXPECT_EQ(2, vie_.GetTotalNumExternalEncoderRegistered());
+
+ // At this moment the total registration count is two, but only one encoder
+ // is registered per channel.
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_EQ(1, vie_.GetNumExternalEncoderRegistered(channel_num));
+
+ // Removing send streams decrements the registration count.
+ EXPECT_TRUE(channel_->RemoveSendStream(1234));
+ EXPECT_EQ(1, vie_.GetTotalNumExternalEncoderRegistered());
+
+ // When we remove the last send stream, it also destroys the last send
+ // channel and causes the registration count to drop to zero. It is a little
+ // weird, but not a bug.
+ EXPECT_TRUE(channel_->RemoveSendStream(2345));
+ EXPECT_EQ(0, vie_.GetTotalNumExternalEncoderRegistered());
+}
+
+TEST_F(WebRtcVideoEngineTestFake, DontRegisterEncoderForNonVP8) {
+ encoder_factory_.AddSupportedVideoCodecType(webrtc::kVideoCodecGeneric,
+ "GENERIC");
+ engine_.SetExternalEncoderFactory(&encoder_factory_);
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ // Note: unlike the SetRecvCodecs, we must set a valid video codec for
+ // channel_->SetSendCodecs() to succeed.
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ EXPECT_EQ(0, vie_.GetNumExternalEncoderRegistered(channel_num));
+}
+
+// Test that NACK and REMB are enabled for external codec.
+TEST_F(WebRtcVideoEngineTestFake, FeedbackParamsForNonVP8) {
+ encoder_factory_.AddSupportedVideoCodecType(webrtc::kVideoCodecGeneric,
+ "GENERIC");
+ engine_.SetExternalEncoderFactory(&encoder_factory_);
+ encoder_factory_.NotifyCodecsAvailable();
+ EXPECT_TRUE(SetupEngine());
+
+ std::vector<cricket::VideoCodec> codecs(engine_.codecs());
+ EXPECT_EQ("GENERIC", codecs[0].name);
+ EXPECT_TRUE(codecs[0].HasFeedbackParam(
+ cricket::FeedbackParam(cricket::kRtcpFbParamNack,
+ cricket::kParamValueEmpty)));
+ EXPECT_TRUE(codecs[0].HasFeedbackParam(
+ cricket::FeedbackParam(cricket::kRtcpFbParamRemb,
+ cricket::kParamValueEmpty)));
+ EXPECT_TRUE(codecs[0].HasFeedbackParam(
+ cricket::FeedbackParam(cricket::kRtcpFbParamCcm,
+ cricket::kRtcpFbCcmParamFir)));
+}
+
+TEST_F(WebRtcVideoEngineTestFake, UpdateEncoderCodecsAfterSetFactory) {
+ engine_.SetExternalEncoderFactory(&encoder_factory_);
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+
+ encoder_factory_.AddSupportedVideoCodecType(webrtc::kVideoCodecVP8, "VP8");
+ encoder_factory_.NotifyCodecsAvailable();
+ std::vector<cricket::VideoCodec> codecs;
+ codecs.push_back(kVP8Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ EXPECT_TRUE(vie_.ExternalEncoderRegistered(channel_num, 100));
+ EXPECT_EQ(1, vie_.GetNumExternalEncoderRegistered(channel_num));
+ EXPECT_EQ(1, encoder_factory_.GetNumCreatedEncoders());
+}
+
+// Tests that OnReadyToSend will be propagated into ViE.
+TEST_F(WebRtcVideoEngineTestFake, OnReadyToSend) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = vie_.GetLastChannel();
+ EXPECT_TRUE(vie_.GetIsTransmitting(channel_num));
+
+ channel_->OnReadyToSend(false);
+ EXPECT_FALSE(vie_.GetIsTransmitting(channel_num));
+
+ channel_->OnReadyToSend(true);
+ EXPECT_TRUE(vie_.GetIsTransmitting(channel_num));
+}
+
+#if 0
+TEST_F(WebRtcVideoEngineTestFake, CaptureFrameTimestampToNtpTimestamp) {
+ EXPECT_TRUE(SetupEngine());
+ int capture_id = vie_.GetCaptureId(vie_.GetLastChannel());
+
+ // Set send codec.
+ cricket::VideoCodec codec(kVP8Codec);
+ std::vector<cricket::VideoCodec> codec_list;
+ codec_list.push_back(codec);
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(123)));
+ EXPECT_TRUE(channel_->SetSendCodecs(codec_list));
+ EXPECT_TRUE(channel_->SetSend(true));
+
+ int64 timestamp = time(NULL) * talk_base::kNumNanosecsPerSec;
+ SendI420ScreencastFrameWithTimestamp(
+ kVP8Codec.width, kVP8Codec.height, timestamp);
+ EXPECT_EQ(talk_base::UnixTimestampNanosecsToNtpMillisecs(timestamp),
+ vie_.GetCaptureLastTimestamp(capture_id));
+
+ SendI420ScreencastFrameWithTimestamp(kVP8Codec.width, kVP8Codec.height, 0);
+ EXPECT_EQ(0, vie_.GetCaptureLastTimestamp(capture_id));
+}
+#endif
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.cc
new file mode 100644
index 00000000000..584aac0c3f3
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.cc
@@ -0,0 +1,358 @@
+/*
+ * libjingle
+ * Copyright 2011 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/media/webrtc/webrtcvideoframe.h"
+
+#include "libyuv/convert.h"
+#include "libyuv/convert_from.h"
+#include "libyuv/planar_functions.h"
+#include "talk/base/logging.h"
+#include "talk/media/base/videocapturer.h"
+#include "talk/media/base/videocommon.h"
+
+namespace cricket {
+
+static const int kWatermarkWidth = 8;
+static const int kWatermarkHeight = 8;
+static const int kWatermarkOffsetFromLeft = 8;
+static const int kWatermarkOffsetFromBottom = 8;
+static const unsigned char kWatermarkMaxYValue = 64;
+
+FrameBuffer::FrameBuffer() : length_(0) {}
+
+FrameBuffer::FrameBuffer(size_t length) : length_(0) {
+ char* buffer = new char[length];
+ SetData(buffer, length);
+}
+
+FrameBuffer::~FrameBuffer() {
+ // Make sure that the video_frame_ doesn't delete the buffer as it may be
+ // shared between multiple WebRtcVideoFrame.
+ uint8_t* new_memory = NULL;
+ uint32_t new_length = 0;
+ uint32_t new_size = 0;
+ video_frame_.Swap(new_memory, new_length, new_size);
+}
+
+void FrameBuffer::SetData(char* data, size_t length) {
+ data_.reset(data);
+ length_ = length;
+ uint8_t* new_memory = reinterpret_cast<uint8_t*>(data);
+ uint32_t new_length = static_cast<int>(length);
+ uint32_t new_size = static_cast<int>(length);
+ video_frame_.Swap(new_memory, new_length, new_size);
+}
+
+void FrameBuffer::ReturnData(char** data, size_t* length) {
+ uint8_t* old_memory = NULL;
+ uint32_t old_length = 0;
+ uint32_t old_size = 0;
+ video_frame_.Swap(old_memory, old_length, old_size);
+ data_.release();
+ length_ = 0;
+ *length = old_length;
+ *data = reinterpret_cast<char*>(old_memory);
+}
+
+char* FrameBuffer::data() { return data_.get(); }
+
+size_t FrameBuffer::length() const { return length_; }
+
+webrtc::VideoFrame* FrameBuffer::frame() { return &video_frame_; }
+
+const webrtc::VideoFrame* FrameBuffer::frame() const { return &video_frame_; }
+
+WebRtcVideoFrame::WebRtcVideoFrame()
+ : video_buffer_(new RefCountedBuffer()), is_black_(false) {}
+
+WebRtcVideoFrame::~WebRtcVideoFrame() {}
+
+bool WebRtcVideoFrame::Init(
+ uint32 format, int w, int h, int dw, int dh, uint8* sample,
+ size_t sample_size, size_t pixel_width, size_t pixel_height,
+ int64 elapsed_time, int64 time_stamp, int rotation) {
+ return Reset(format, w, h, dw, dh, sample, sample_size, pixel_width,
+ pixel_height, elapsed_time, time_stamp, rotation);
+}
+
+bool WebRtcVideoFrame::Init(const CapturedFrame* frame, int dw, int dh) {
+ return Reset(frame->fourcc, frame->width, frame->height, dw, dh,
+ static_cast<uint8*>(frame->data), frame->data_size,
+ frame->pixel_width, frame->pixel_height, frame->elapsed_time,
+ frame->time_stamp, frame->rotation);
+}
+
+bool WebRtcVideoFrame::InitToBlack(int w, int h, size_t pixel_width,
+ size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp) {
+ InitToEmptyBuffer(w, h, pixel_width, pixel_height, elapsed_time, time_stamp);
+ if (!is_black_) {
+ return SetToBlack();
+ }
+ return true;
+}
+
+void WebRtcVideoFrame::Attach(
+ uint8* buffer, size_t buffer_size, int w, int h, size_t pixel_width,
+ size_t pixel_height, int64 elapsed_time, int64 time_stamp, int rotation) {
+ talk_base::scoped_refptr<RefCountedBuffer> video_buffer(
+ new RefCountedBuffer());
+ video_buffer->SetData(reinterpret_cast<char*>(buffer), buffer_size);
+ Attach(video_buffer.get(), buffer_size, w, h, pixel_width, pixel_height,
+ elapsed_time, time_stamp, rotation);
+}
+
+void WebRtcVideoFrame::Detach(uint8** data, size_t* length) {
+ video_buffer_->ReturnData(reinterpret_cast<char**>(data), length);
+}
+
+size_t WebRtcVideoFrame::GetWidth() const { return frame()->Width(); }
+
+size_t WebRtcVideoFrame::GetHeight() const { return frame()->Height(); }
+
+const uint8* WebRtcVideoFrame::GetYPlane() const {
+ uint8_t* buffer = frame()->Buffer();
+ return buffer;
+}
+
+const uint8* WebRtcVideoFrame::GetUPlane() const {
+ uint8_t* buffer = frame()->Buffer();
+ if (buffer) {
+ buffer += (frame()->Width() * frame()->Height());
+ }
+ return buffer;
+}
+
+const uint8* WebRtcVideoFrame::GetVPlane() const {
+ uint8_t* buffer = frame()->Buffer();
+ if (buffer) {
+ int uv_size = static_cast<int>(GetChromaSize());
+ buffer += frame()->Width() * frame()->Height() + uv_size;
+ }
+ return buffer;
+}
+
+uint8* WebRtcVideoFrame::GetYPlane() {
+ uint8_t* buffer = frame()->Buffer();
+ return buffer;
+}
+
+uint8* WebRtcVideoFrame::GetUPlane() {
+ uint8_t* buffer = frame()->Buffer();
+ if (buffer) {
+ buffer += (frame()->Width() * frame()->Height());
+ }
+ return buffer;
+}
+
+uint8* WebRtcVideoFrame::GetVPlane() {
+ uint8_t* buffer = frame()->Buffer();
+ if (buffer) {
+ int uv_size = static_cast<int>(GetChromaSize());
+ buffer += frame()->Width() * frame()->Height() + uv_size;
+ }
+ return buffer;
+}
+
+VideoFrame* WebRtcVideoFrame::Copy() const {
+ const char* old_buffer = video_buffer_->data();
+ if (!old_buffer)
+ return NULL;
+ size_t new_buffer_size = video_buffer_->length();
+
+ WebRtcVideoFrame* ret_val = new WebRtcVideoFrame();
+ ret_val->Attach(video_buffer_.get(), new_buffer_size, frame()->Width(),
+ frame()->Height(), pixel_width_, pixel_height_, elapsed_time_,
+ time_stamp_, rotation_);
+ return ret_val;
+}
+
+bool WebRtcVideoFrame::MakeExclusive() {
+ const int length = static_cast<int>(video_buffer_->length());
+ RefCountedBuffer* exclusive_buffer = new RefCountedBuffer(length);
+ memcpy(exclusive_buffer->data(), video_buffer_->data(), length);
+ Attach(exclusive_buffer, length, frame()->Width(), frame()->Height(),
+ pixel_width_, pixel_height_, elapsed_time_, time_stamp_, rotation_);
+ return true;
+}
+
+size_t WebRtcVideoFrame::CopyToBuffer(uint8* buffer, size_t size) const {
+ if (!frame()->Buffer()) {
+ return 0;
+ }
+
+ size_t needed = frame()->Length();
+ if (needed <= size) {
+ memcpy(buffer, frame()->Buffer(), needed);
+ }
+ return needed;
+}
+
+// TODO(fbarchard): Refactor into base class and share with lmi
+size_t WebRtcVideoFrame::ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
+ size_t size, int stride_rgb) const {
+ if (!frame()->Buffer()) {
+ return 0;
+ }
+ size_t width = frame()->Width();
+ size_t height = frame()->Height();
+ size_t needed = (stride_rgb >= 0 ? stride_rgb : -stride_rgb) * height;
+ if (size < needed) {
+ LOG(LS_WARNING) << "RGB buffer is not large enough";
+ return needed;
+ }
+
+ if (libyuv::ConvertFromI420(GetYPlane(), GetYPitch(), GetUPlane(),
+ GetUPitch(), GetVPlane(), GetVPitch(), buffer,
+ stride_rgb,
+ static_cast<int>(width),
+ static_cast<int>(height),
+ to_fourcc)) {
+ LOG(LS_WARNING) << "RGB type not supported: " << to_fourcc;
+ return 0; // 0 indicates error
+ }
+ return needed;
+}
+
+void WebRtcVideoFrame::Attach(
+ RefCountedBuffer* video_buffer, size_t buffer_size, int w, int h,
+ size_t pixel_width, size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp, int rotation) {
+ if (video_buffer_.get() == video_buffer) {
+ return;
+ }
+ is_black_ = false;
+ video_buffer_ = video_buffer;
+ frame()->SetWidth(w);
+ frame()->SetHeight(h);
+ pixel_width_ = pixel_width;
+ pixel_height_ = pixel_height;
+ elapsed_time_ = elapsed_time;
+ time_stamp_ = time_stamp;
+ rotation_ = rotation;
+}
+
+// Add a square watermark near the left-low corner. clamp Y.
+// Returns false on error.
+bool WebRtcVideoFrame::AddWatermark() {
+ size_t w = GetWidth();
+ size_t h = GetHeight();
+
+ if (w < kWatermarkWidth + kWatermarkOffsetFromLeft ||
+ h < kWatermarkHeight + kWatermarkOffsetFromBottom) {
+ return false;
+ }
+
+ uint8* buffer = GetYPlane();
+ for (size_t x = kWatermarkOffsetFromLeft;
+ x < kWatermarkOffsetFromLeft + kWatermarkWidth; ++x) {
+ for (size_t y = h - kWatermarkOffsetFromBottom - kWatermarkHeight;
+ y < h - kWatermarkOffsetFromBottom; ++y) {
+ buffer[y * w + x] =
+ talk_base::_min(buffer[y * w + x], kWatermarkMaxYValue);
+ }
+ }
+ return true;
+}
+
+bool WebRtcVideoFrame::Reset(
+ uint32 format, int w, int h, int dw, int dh, uint8* sample,
+ size_t sample_size, size_t pixel_width, size_t pixel_height,
+ int64 elapsed_time, int64 time_stamp, int rotation) {
+ if (!Validate(format, w, h, sample, sample_size)) {
+ return false;
+ }
+ // Translate aliases to standard enums (e.g., IYUV -> I420).
+ format = CanonicalFourCC(format);
+
+ // Round display width and height down to multiple of 4, to avoid webrtc
+ // size calculation error on odd sizes.
+ // TODO(Ronghua): Remove this once the webrtc allocator is fixed.
+ dw = (dw > 4) ? (dw & ~3) : dw;
+ dh = (dh > 4) ? (dh & ~3) : dh;
+
+ // Set up a new buffer.
+ // TODO(fbarchard): Support lazy allocation.
+ int new_width = dw;
+ int new_height = dh;
+ if (rotation == 90 || rotation == 270) { // If rotated swap width, height.
+ new_width = dh;
+ new_height = dw;
+ }
+
+ size_t desired_size = SizeOf(new_width, new_height);
+ talk_base::scoped_refptr<RefCountedBuffer> video_buffer(
+ new RefCountedBuffer(desired_size));
+ // Since the libyuv::ConvertToI420 will handle the rotation, so the
+ // new frame's rotation should always be 0.
+ Attach(video_buffer.get(), desired_size, new_width, new_height, pixel_width,
+ pixel_height, elapsed_time, time_stamp, 0);
+
+ int horiz_crop = ((w - dw) / 2) & ~1;
+ // ARGB on Windows has negative height.
+ // The sample's layout in memory is normal, so just correct crop.
+ int vert_crop = ((abs(h) - dh) / 2) & ~1;
+ // Conversion functions expect negative height to flip the image.
+ int idh = (h < 0) ? -dh : dh;
+ uint8* y = GetYPlane();
+ int y_stride = GetYPitch();
+ uint8* u = GetUPlane();
+ int u_stride = GetUPitch();
+ uint8* v = GetVPlane();
+ int v_stride = GetVPitch();
+ int r = libyuv::ConvertToI420(
+ sample, sample_size, y, y_stride, u, u_stride, v, v_stride, horiz_crop,
+ vert_crop, w, h, dw, idh, static_cast<libyuv::RotationMode>(rotation),
+ format);
+ if (r) {
+ LOG(LS_ERROR) << "Error parsing format: " << GetFourccName(format)
+ << " return code : " << r;
+ return false;
+ }
+ return true;
+}
+
+VideoFrame* WebRtcVideoFrame::CreateEmptyFrame(
+ int w, int h, size_t pixel_width, size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp) const {
+ WebRtcVideoFrame* frame = new WebRtcVideoFrame();
+ frame->InitToEmptyBuffer(w, h, pixel_width, pixel_height, elapsed_time,
+ time_stamp);
+ return frame;
+}
+
+void WebRtcVideoFrame::InitToEmptyBuffer(int w, int h, size_t pixel_width,
+ size_t pixel_height,
+ int64 elapsed_time, int64 time_stamp) {
+ size_t buffer_size = VideoFrame::SizeOf(w, h);
+ talk_base::scoped_refptr<RefCountedBuffer> video_buffer(
+ new RefCountedBuffer(buffer_size));
+ Attach(video_buffer.get(), buffer_size, w, h, pixel_width, pixel_height,
+ elapsed_time, time_stamp, 0);
+}
+
+} // namespace cricket
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.h
new file mode 100644
index 00000000000..18475a69775
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe.h
@@ -0,0 +1,150 @@
+/*
+ * libjingle
+ * Copyright 2011 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_WEBRTCVIDEOFRAME_H_
+#define TALK_MEDIA_WEBRTCVIDEOFRAME_H_
+
+#include "talk/base/buffer.h"
+#include "talk/base/refcount.h"
+#include "talk/base/scoped_ref_ptr.h"
+#include "talk/media/base/videoframe.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/interface/module_common_types.h"
+
+namespace cricket {
+
+struct CapturedFrame;
+
+// Class that takes ownership of the frame passed to it.
+class FrameBuffer {
+ public:
+ FrameBuffer();
+ explicit FrameBuffer(size_t length);
+ ~FrameBuffer();
+
+ void SetData(char* data, size_t length);
+ void ReturnData(char** data, size_t* length);
+ char* data();
+ size_t length() const;
+
+ webrtc::VideoFrame* frame();
+ const webrtc::VideoFrame* frame() const;
+
+ private:
+ talk_base::scoped_array<char> data_;
+ size_t length_;
+ webrtc::VideoFrame video_frame_;
+};
+
+class WebRtcVideoFrame : public VideoFrame {
+ public:
+ typedef talk_base::RefCountedObject<FrameBuffer> RefCountedBuffer;
+
+ WebRtcVideoFrame();
+ ~WebRtcVideoFrame();
+
+ // Creates a frame from a raw sample with FourCC "format" and size "w" x "h".
+ // "h" can be negative indicating a vertically flipped image.
+ // "dh" is destination height if cropping is desired and is always positive.
+ // Returns "true" if successful.
+ bool Init(uint32 format, int w, int h, int dw, int dh, uint8* sample,
+ size_t sample_size, size_t pixel_width, size_t pixel_height,
+ int64 elapsed_time, int64 time_stamp, int rotation);
+
+ bool Init(const CapturedFrame* frame, int dw, int dh);
+
+ bool InitToBlack(int w, int h, size_t pixel_width, size_t pixel_height,
+ int64 elapsed_time, int64 time_stamp);
+
+ void Attach(uint8* buffer, size_t buffer_size, int w, int h,
+ size_t pixel_width, size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp, int rotation);
+
+ void Detach(uint8** data, size_t* length);
+ bool AddWatermark();
+ webrtc::VideoFrame* frame() { return video_buffer_->frame(); }
+ webrtc::VideoFrame* frame() const { return video_buffer_->frame(); }
+
+ // From base class VideoFrame.
+ virtual bool Reset(uint32 format, int w, int h, int dw, int dh, uint8* sample,
+ size_t sample_size, size_t pixel_width,
+ size_t pixel_height, int64 elapsed_time, int64 time_stamp,
+ int rotation);
+
+ virtual size_t GetWidth() const;
+ virtual size_t GetHeight() const;
+ virtual const uint8* GetYPlane() const;
+ virtual const uint8* GetUPlane() const;
+ virtual const uint8* GetVPlane() const;
+ virtual uint8* GetYPlane();
+ virtual uint8* GetUPlane();
+ virtual uint8* GetVPlane();
+ virtual int32 GetYPitch() const { return frame()->Width(); }
+ virtual int32 GetUPitch() const { return (frame()->Width() + 1) / 2; }
+ virtual int32 GetVPitch() const { return (frame()->Width() + 1) / 2; }
+ virtual void* GetNativeHandle() const { return NULL; }
+
+ virtual size_t GetPixelWidth() const { return pixel_width_; }
+ virtual size_t GetPixelHeight() const { return pixel_height_; }
+ virtual int64 GetElapsedTime() const { return elapsed_time_; }
+ virtual int64 GetTimeStamp() const { return time_stamp_; }
+ virtual void SetElapsedTime(int64 elapsed_time) {
+ elapsed_time_ = elapsed_time;
+ }
+ virtual void SetTimeStamp(int64 time_stamp) { time_stamp_ = time_stamp; }
+
+ virtual int GetRotation() const { return rotation_; }
+
+ virtual VideoFrame* Copy() const;
+ virtual bool MakeExclusive();
+ virtual size_t CopyToBuffer(uint8* buffer, size_t size) const;
+ virtual size_t ConvertToRgbBuffer(uint32 to_fourcc, uint8* buffer,
+ size_t size, int stride_rgb) const;
+
+ private:
+ void Attach(RefCountedBuffer* video_buffer, size_t buffer_size, int w, int h,
+ size_t pixel_width, size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp, int rotation);
+
+ virtual VideoFrame* CreateEmptyFrame(int w, int h, size_t pixel_width,
+ size_t pixel_height, int64 elapsed_time,
+ int64 time_stamp) const;
+ void InitToEmptyBuffer(int w, int h, size_t pixel_width, size_t pixel_height,
+ int64 elapsed_time, int64 time_stamp);
+
+ talk_base::scoped_refptr<RefCountedBuffer> video_buffer_;
+ bool is_black_;
+ size_t pixel_width_;
+ size_t pixel_height_;
+ int64 elapsed_time_;
+ int64 time_stamp_;
+ int rotation_;
+};
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTCVIDEOFRAME_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe_unittest.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe_unittest.cc
new file mode 100644
index 00000000000..2f0decb2898
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvideoframe_unittest.cc
@@ -0,0 +1,313 @@
+/*
+ * libjingle
+ * Copyright 2011 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "talk/base/flags.h"
+#include "talk/media/base/videoframe_unittest.h"
+#include "talk/media/webrtc/webrtcvideoframe.h"
+
+extern int FLAG_yuvconverter_repeat; // From lmivideoframe_unittest.cc.
+
+class WebRtcVideoFrameTest : public VideoFrameTest<cricket::WebRtcVideoFrame> {
+ public:
+ WebRtcVideoFrameTest() {
+ repeat_ = FLAG_yuvconverter_repeat;
+ }
+
+ void TestInit(int cropped_width, int cropped_height) {
+ const int frame_width = 1920;
+ const int frame_height = 1080;
+
+ // Build the CapturedFrame.
+ cricket::CapturedFrame captured_frame;
+ captured_frame.fourcc = cricket::FOURCC_I420;
+ captured_frame.pixel_width = 1;
+ captured_frame.pixel_height = 1;
+ captured_frame.elapsed_time = 1234;
+ captured_frame.time_stamp = 5678;
+ captured_frame.rotation = 0;
+ captured_frame.width = frame_width;
+ captured_frame.height = frame_height;
+ captured_frame.data_size = (frame_width * frame_height) +
+ ((frame_width + 1) / 2) * ((frame_height + 1) / 2) * 2;
+ talk_base::scoped_array<uint8> captured_frame_buffer(
+ new uint8[captured_frame.data_size]);
+ captured_frame.data = captured_frame_buffer.get();
+
+ // Create the new frame from the CapturedFrame.
+ cricket::WebRtcVideoFrame frame;
+ EXPECT_TRUE(frame.Init(&captured_frame, cropped_width, cropped_height));
+
+ // Verify the new frame.
+ EXPECT_EQ(1u, frame.GetPixelWidth());
+ EXPECT_EQ(1u, frame.GetPixelHeight());
+ EXPECT_EQ(1234, frame.GetElapsedTime());
+ EXPECT_EQ(5678, frame.GetTimeStamp());
+ EXPECT_EQ(0, frame.GetRotation());
+ // The size of the new frame should have been cropped to multiple of 4.
+ EXPECT_EQ(static_cast<size_t>(cropped_width & ~3), frame.GetWidth());
+ EXPECT_EQ(static_cast<size_t>(cropped_height & ~3), frame.GetHeight());
+ }
+};
+
+#define TEST_WEBRTCVIDEOFRAME(X) TEST_F(WebRtcVideoFrameTest, X) { \
+ VideoFrameTest<cricket::WebRtcVideoFrame>::X(); \
+}
+
+TEST_WEBRTCVIDEOFRAME(ConstructI420)
+TEST_WEBRTCVIDEOFRAME(ConstructI422)
+TEST_WEBRTCVIDEOFRAME(ConstructYuy2)
+TEST_WEBRTCVIDEOFRAME(ConstructYuy2Unaligned)
+TEST_WEBRTCVIDEOFRAME(ConstructYuy2Wide)
+TEST_WEBRTCVIDEOFRAME(ConstructYV12)
+TEST_WEBRTCVIDEOFRAME(ConstructUyvy)
+TEST_WEBRTCVIDEOFRAME(ConstructM420)
+TEST_WEBRTCVIDEOFRAME(ConstructQ420)
+TEST_WEBRTCVIDEOFRAME(ConstructNV21)
+TEST_WEBRTCVIDEOFRAME(ConstructNV12)
+TEST_WEBRTCVIDEOFRAME(ConstructABGR)
+TEST_WEBRTCVIDEOFRAME(ConstructARGB)
+TEST_WEBRTCVIDEOFRAME(ConstructARGBWide)
+TEST_WEBRTCVIDEOFRAME(ConstructBGRA)
+TEST_WEBRTCVIDEOFRAME(Construct24BG)
+TEST_WEBRTCVIDEOFRAME(ConstructRaw)
+TEST_WEBRTCVIDEOFRAME(ConstructRGB565)
+TEST_WEBRTCVIDEOFRAME(ConstructARGB1555)
+TEST_WEBRTCVIDEOFRAME(ConstructARGB4444)
+
+TEST_WEBRTCVIDEOFRAME(ConstructI420Mirror)
+TEST_WEBRTCVIDEOFRAME(ConstructI420Rotate0)
+TEST_WEBRTCVIDEOFRAME(ConstructI420Rotate90)
+TEST_WEBRTCVIDEOFRAME(ConstructI420Rotate180)
+TEST_WEBRTCVIDEOFRAME(ConstructI420Rotate270)
+TEST_WEBRTCVIDEOFRAME(ConstructYV12Rotate0)
+TEST_WEBRTCVIDEOFRAME(ConstructYV12Rotate90)
+TEST_WEBRTCVIDEOFRAME(ConstructYV12Rotate180)
+TEST_WEBRTCVIDEOFRAME(ConstructYV12Rotate270)
+TEST_WEBRTCVIDEOFRAME(ConstructNV12Rotate0)
+TEST_WEBRTCVIDEOFRAME(ConstructNV12Rotate90)
+TEST_WEBRTCVIDEOFRAME(ConstructNV12Rotate180)
+TEST_WEBRTCVIDEOFRAME(ConstructNV12Rotate270)
+TEST_WEBRTCVIDEOFRAME(ConstructNV21Rotate0)
+TEST_WEBRTCVIDEOFRAME(ConstructNV21Rotate90)
+TEST_WEBRTCVIDEOFRAME(ConstructNV21Rotate180)
+TEST_WEBRTCVIDEOFRAME(ConstructNV21Rotate270)
+TEST_WEBRTCVIDEOFRAME(ConstructUYVYRotate0)
+TEST_WEBRTCVIDEOFRAME(ConstructUYVYRotate90)
+TEST_WEBRTCVIDEOFRAME(ConstructUYVYRotate180)
+TEST_WEBRTCVIDEOFRAME(ConstructUYVYRotate270)
+TEST_WEBRTCVIDEOFRAME(ConstructYUY2Rotate0)
+TEST_WEBRTCVIDEOFRAME(ConstructYUY2Rotate90)
+TEST_WEBRTCVIDEOFRAME(ConstructYUY2Rotate180)
+TEST_WEBRTCVIDEOFRAME(ConstructYUY2Rotate270)
+TEST_WEBRTCVIDEOFRAME(ConstructI4201Pixel)
+TEST_WEBRTCVIDEOFRAME(ConstructI4205Pixel)
+// TODO(juberti): WebRtcVideoFrame does not support horizontal crop.
+// Re-evaluate once it supports 3 independent planes, since we might want to
+// just Init normally and then crop by adjusting pointers.
+// TEST_WEBRTCVIDEOFRAME(ConstructI420CropHorizontal)
+TEST_WEBRTCVIDEOFRAME(ConstructI420CropVertical)
+// TODO(juberti): WebRtcVideoFrame is not currently refcounted.
+// TEST_WEBRTCVIDEOFRAME(ConstructCopy)
+// TEST_WEBRTCVIDEOFRAME(ConstructCopyIsRef)
+TEST_WEBRTCVIDEOFRAME(ConstructBlack)
+// TODO(fbarchard): Implement Jpeg
+// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI420)
+// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI422)
+// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI444)
+// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI411)
+// TEST_WEBRTCVIDEOFRAME(ConstructMjpgI400)
+// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI420)
+// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI422)
+// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI444)
+// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI411)
+// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI400)
+TEST_WEBRTCVIDEOFRAME(ValidateI420)
+TEST_WEBRTCVIDEOFRAME(ValidateI420SmallSize)
+TEST_WEBRTCVIDEOFRAME(ValidateI420LargeSize)
+TEST_WEBRTCVIDEOFRAME(ValidateI420HugeSize)
+// TEST_WEBRTCVIDEOFRAME(ValidateMjpgI420InvalidSize)
+// TEST_WEBRTCVIDEOFRAME(ValidateI420InvalidSize)
+
+// TODO(fbarchard): WebRtcVideoFrame does not support odd sizes.
+// Re-evaluate once WebRTC switches to libyuv
+// TEST_WEBRTCVIDEOFRAME(ConstructYuy2AllSizes)
+// TEST_WEBRTCVIDEOFRAME(ConstructARGBAllSizes)
+TEST_WEBRTCVIDEOFRAME(Reset)
+TEST_WEBRTCVIDEOFRAME(ConvertToABGRBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToABGRBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToABGRBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToARGB1555Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToARGB1555BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToARGB1555BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToARGB4444Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToARGB4444BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToARGB4444BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToARGBBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToARGBBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToARGBBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToBGRABuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToBGRABufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToBGRABufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToRAWBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToRAWBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToRAWBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToRGB24Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToRGB24BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToRGB24BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToRGB565Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToRGB565BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToRGB565BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerBGGRBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerBGGRBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerBGGRBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerGRBGBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerGRBGBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerGRBGBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerGBRGBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerGBRGBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerGBRGBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerRGGBBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerRGGBBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToBayerRGGBBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToI400Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToI400BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToI400BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToYUY2Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToYUY2BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToYUY2BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertToUYVYBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertToUYVYBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertToUYVYBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromABGRBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromABGRBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromABGRBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromARGB1555Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromARGB1555BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromARGB1555BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromARGB4444Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromARGB4444BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromARGB4444BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromARGBBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromARGBBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromARGBBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBGRABuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBGRABufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBGRABufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromRAWBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromRAWBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromRAWBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromRGB24Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromRGB24BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromRGB24BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromRGB565Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromRGB565BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromRGB565BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerBGGRBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerBGGRBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerBGGRBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerGRBGBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerGRBGBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerGRBGBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerGBRGBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerGBRGBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerGBRGBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerRGGBBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerRGGBBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromBayerRGGBBufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromI400Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromI400BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromI400BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromYUY2Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromYUY2BufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromYUY2BufferInverted)
+TEST_WEBRTCVIDEOFRAME(ConvertFromUYVYBuffer)
+TEST_WEBRTCVIDEOFRAME(ConvertFromUYVYBufferStride)
+TEST_WEBRTCVIDEOFRAME(ConvertFromUYVYBufferInverted)
+// TEST_WEBRTCVIDEOFRAME(ConvertToI422Buffer)
+TEST_WEBRTCVIDEOFRAME(ConvertARGBToBayerGRBG)
+TEST_WEBRTCVIDEOFRAME(ConvertARGBToBayerGBRG)
+TEST_WEBRTCVIDEOFRAME(ConvertARGBToBayerBGGR)
+TEST_WEBRTCVIDEOFRAME(ConvertARGBToBayerRGGB)
+TEST_WEBRTCVIDEOFRAME(CopyToBuffer)
+TEST_WEBRTCVIDEOFRAME(CopyToFrame)
+TEST_WEBRTCVIDEOFRAME(Write)
+TEST_WEBRTCVIDEOFRAME(CopyToBuffer1Pixel)
+// TEST_WEBRTCVIDEOFRAME(ConstructARGBBlackWhitePixel)
+
+TEST_WEBRTCVIDEOFRAME(StretchToFrame)
+TEST_WEBRTCVIDEOFRAME(Copy)
+TEST_WEBRTCVIDEOFRAME(CopyIsRef)
+TEST_WEBRTCVIDEOFRAME(MakeExclusive)
+
+// These functions test implementation-specific details.
+TEST_F(WebRtcVideoFrameTest, AttachAndRelease) {
+ cricket::WebRtcVideoFrame frame1, frame2;
+ ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
+ const int64 time_stamp = 0x7FFFFFFFFFFFFFF0LL;
+ frame1.SetTimeStamp(time_stamp);
+ EXPECT_EQ(time_stamp, frame1.GetTimeStamp());
+ frame2.Attach(frame1.frame()->Buffer(), frame1.frame()->Size(),
+ kWidth, kHeight, 1, 1,
+ frame1.GetElapsedTime(), frame1.GetTimeStamp(), 0);
+ EXPECT_TRUE(IsEqual(frame1, frame2, 0));
+ uint8* buffer;
+ size_t size;
+ frame2.Detach(&buffer, &size);
+ EXPECT_EQ(frame1.frame()->Buffer(), buffer);
+ EXPECT_EQ(frame1.frame()->Size(), size);
+ EXPECT_TRUE(IsNull(frame2));
+ EXPECT_TRUE(IsSize(frame1, kWidth, kHeight));
+}
+
+TEST_F(WebRtcVideoFrameTest, Transfer) {
+ cricket::WebRtcVideoFrame frame1, frame2;
+ ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
+ uint8* buffer;
+ size_t size;
+ frame1.Detach(&buffer, &size);
+ frame2.Attach(buffer, size, kWidth, kHeight, 1, 1,
+ frame1.GetElapsedTime(), frame1.GetTimeStamp(), 0);
+ EXPECT_TRUE(IsNull(frame1));
+ EXPECT_TRUE(IsSize(frame2, kWidth, kHeight));
+}
+
+// Tests the Init function with different cropped size.
+TEST_F(WebRtcVideoFrameTest, InitEvenSize) {
+ TestInit(640, 360);
+}
+
+TEST_F(WebRtcVideoFrameTest, InitOddWidth) {
+ TestInit(601, 480);
+}
+
+TEST_F(WebRtcVideoFrameTest, InitOddHeight) {
+ TestInit(360, 765);
+}
+
+TEST_F(WebRtcVideoFrameTest, InitOddWidthHeight) {
+ TestInit(355, 1021);
+}
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvie.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvie.h
new file mode 100644
index 00000000000..9550962e5d1
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvie.h
@@ -0,0 +1,151 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef TALK_MEDIA_WEBRTCVIE_H_
+#define TALK_MEDIA_WEBRTCVIE_H_
+
+#include "talk/base/common.h"
+#include "talk/media/webrtc/webrtccommon.h"
+#include "webrtc/common_types.h"
+#include "webrtc/modules/interface/module_common_types.h"
+#include "webrtc/modules/video_capture/include/video_capture.h"
+#include "webrtc/modules/video_coding/codecs/interface/video_codec_interface.h"
+#include "webrtc/modules/video_render/include/video_render.h"
+#include "webrtc/video_engine/include/vie_base.h"
+#include "webrtc/video_engine/include/vie_capture.h"
+#include "webrtc/video_engine/include/vie_codec.h"
+#include "webrtc/video_engine/include/vie_errors.h"
+#include "webrtc/video_engine/include/vie_external_codec.h"
+#include "webrtc/video_engine/include/vie_image_process.h"
+#include "webrtc/video_engine/include/vie_network.h"
+#include "webrtc/video_engine/include/vie_render.h"
+#include "webrtc/video_engine/include/vie_rtp_rtcp.h"
+
+namespace cricket {
+
+// all tracing macros should go to a common file
+
+// automatically handles lifetime of VideoEngine
+class scoped_vie_engine {
+ public:
+ explicit scoped_vie_engine(webrtc::VideoEngine* e) : ptr(e) {}
+ // VERIFY, to ensure that there are no leaks at shutdown
+ ~scoped_vie_engine() {
+ if (ptr) {
+ webrtc::VideoEngine::Delete(ptr);
+ }
+ }
+ webrtc::VideoEngine* get() const { return ptr; }
+ private:
+ webrtc::VideoEngine* ptr;
+};
+
+// scoped_ptr class to handle obtaining and releasing VideoEngine
+// interface pointers
+template<class T> class scoped_vie_ptr {
+ public:
+ explicit scoped_vie_ptr(const scoped_vie_engine& e)
+ : ptr(T::GetInterface(e.get())) {}
+ explicit scoped_vie_ptr(T* p) : ptr(p) {}
+ ~scoped_vie_ptr() { if (ptr) ptr->Release(); }
+ T* operator->() const { return ptr; }
+ T* get() const { return ptr; }
+ private:
+ T* ptr;
+};
+
+// Utility class for aggregating the various WebRTC interface.
+// Fake implementations can also be injected for testing.
+class ViEWrapper {
+ public:
+ ViEWrapper()
+ : engine_(webrtc::VideoEngine::Create()),
+ base_(engine_), codec_(engine_), capture_(engine_),
+ network_(engine_), render_(engine_), rtp_(engine_),
+ image_(engine_), ext_codec_(engine_) {
+ }
+
+ ViEWrapper(webrtc::ViEBase* base, webrtc::ViECodec* codec,
+ webrtc::ViECapture* capture, webrtc::ViENetwork* network,
+ webrtc::ViERender* render, webrtc::ViERTP_RTCP* rtp,
+ webrtc::ViEImageProcess* image,
+ webrtc::ViEExternalCodec* ext_codec)
+ : engine_(NULL),
+ base_(base),
+ codec_(codec),
+ capture_(capture),
+ network_(network),
+ render_(render),
+ rtp_(rtp),
+ image_(image),
+ ext_codec_(ext_codec) {
+ }
+
+ virtual ~ViEWrapper() {}
+ webrtc::VideoEngine* engine() { return engine_.get(); }
+ webrtc::ViEBase* base() { return base_.get(); }
+ webrtc::ViECodec* codec() { return codec_.get(); }
+ webrtc::ViECapture* capture() { return capture_.get(); }
+ webrtc::ViENetwork* network() { return network_.get(); }
+ webrtc::ViERender* render() { return render_.get(); }
+ webrtc::ViERTP_RTCP* rtp() { return rtp_.get(); }
+ webrtc::ViEImageProcess* image() { return image_.get(); }
+ webrtc::ViEExternalCodec* ext_codec() { return ext_codec_.get(); }
+ int error() { return base_->LastError(); }
+
+ private:
+ scoped_vie_engine engine_;
+ scoped_vie_ptr<webrtc::ViEBase> base_;
+ scoped_vie_ptr<webrtc::ViECodec> codec_;
+ scoped_vie_ptr<webrtc::ViECapture> capture_;
+ scoped_vie_ptr<webrtc::ViENetwork> network_;
+ scoped_vie_ptr<webrtc::ViERender> render_;
+ scoped_vie_ptr<webrtc::ViERTP_RTCP> rtp_;
+ scoped_vie_ptr<webrtc::ViEImageProcess> image_;
+ scoped_vie_ptr<webrtc::ViEExternalCodec> ext_codec_;
+};
+
+// Adds indirection to static WebRtc functions, allowing them to be mocked.
+class ViETraceWrapper {
+ public:
+ virtual ~ViETraceWrapper() {}
+
+ virtual int SetTraceFilter(const unsigned int filter) {
+ return webrtc::VideoEngine::SetTraceFilter(filter);
+ }
+ virtual int SetTraceFile(const char* fileNameUTF8) {
+ return webrtc::VideoEngine::SetTraceFile(fileNameUTF8);
+ }
+ virtual int SetTraceCallback(webrtc::TraceCallback* callback) {
+ return webrtc::VideoEngine::SetTraceCallback(callback);
+ }
+};
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTCVIE_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoe.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoe.h
new file mode 100644
index 00000000000..bc8358d9b4a
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoe.h
@@ -0,0 +1,179 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef TALK_MEDIA_WEBRTCVOE_H_
+#define TALK_MEDIA_WEBRTCVOE_H_
+
+#include "talk/base/common.h"
+#include "talk/media/webrtc/webrtccommon.h"
+
+#include "webrtc/common_types.h"
+#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/voice_engine/include/voe_audio_processing.h"
+#include "webrtc/voice_engine/include/voe_base.h"
+#include "webrtc/voice_engine/include/voe_codec.h"
+#include "webrtc/voice_engine/include/voe_dtmf.h"
+#include "webrtc/voice_engine/include/voe_errors.h"
+#include "webrtc/voice_engine/include/voe_external_media.h"
+#include "webrtc/voice_engine/include/voe_file.h"
+#include "webrtc/voice_engine/include/voe_hardware.h"
+#include "webrtc/voice_engine/include/voe_neteq_stats.h"
+#include "webrtc/voice_engine/include/voe_network.h"
+#include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
+#include "webrtc/voice_engine/include/voe_video_sync.h"
+#include "webrtc/voice_engine/include/voe_volume_control.h"
+
+namespace cricket {
+// automatically handles lifetime of WebRtc VoiceEngine
+class scoped_voe_engine {
+ public:
+ explicit scoped_voe_engine(webrtc::VoiceEngine* e) : ptr(e) {}
+ // VERIFY, to ensure that there are no leaks at shutdown
+ ~scoped_voe_engine() { if (ptr) VERIFY(webrtc::VoiceEngine::Delete(ptr)); }
+ // Releases the current pointer.
+ void reset() {
+ if (ptr) {
+ VERIFY(webrtc::VoiceEngine::Delete(ptr));
+ ptr = NULL;
+ }
+ }
+ webrtc::VoiceEngine* get() const { return ptr; }
+ private:
+ webrtc::VoiceEngine* ptr;
+};
+
+// scoped_ptr class to handle obtaining and releasing WebRTC interface pointers
+template<class T>
+class scoped_voe_ptr {
+ public:
+ explicit scoped_voe_ptr(const scoped_voe_engine& e)
+ : ptr(T::GetInterface(e.get())) {}
+ explicit scoped_voe_ptr(T* p) : ptr(p) {}
+ ~scoped_voe_ptr() { if (ptr) ptr->Release(); }
+ T* operator->() const { return ptr; }
+ T* get() const { return ptr; }
+
+ // Releases the current pointer.
+ void reset() {
+ if (ptr) {
+ ptr->Release();
+ ptr = NULL;
+ }
+ }
+
+ private:
+ T* ptr;
+};
+
+// Utility class for aggregating the various WebRTC interface.
+// Fake implementations can also be injected for testing.
+class VoEWrapper {
+ public:
+ VoEWrapper()
+ : engine_(webrtc::VoiceEngine::Create()), processing_(engine_),
+ base_(engine_), codec_(engine_), dtmf_(engine_), file_(engine_),
+ hw_(engine_), media_(engine_), neteq_(engine_), network_(engine_),
+ rtp_(engine_), sync_(engine_), volume_(engine_) {
+ }
+ VoEWrapper(webrtc::VoEAudioProcessing* processing,
+ webrtc::VoEBase* base,
+ webrtc::VoECodec* codec,
+ webrtc::VoEDtmf* dtmf,
+ webrtc::VoEFile* file,
+ webrtc::VoEHardware* hw,
+ webrtc::VoEExternalMedia* media,
+ webrtc::VoENetEqStats* neteq,
+ webrtc::VoENetwork* network,
+ webrtc::VoERTP_RTCP* rtp,
+ webrtc::VoEVideoSync* sync,
+ webrtc::VoEVolumeControl* volume)
+ : engine_(NULL),
+ processing_(processing),
+ base_(base),
+ codec_(codec),
+ dtmf_(dtmf),
+ file_(file),
+ hw_(hw),
+ media_(media),
+ neteq_(neteq),
+ network_(network),
+ rtp_(rtp),
+ sync_(sync),
+ volume_(volume) {
+ }
+ ~VoEWrapper() {}
+ webrtc::VoiceEngine* engine() const { return engine_.get(); }
+ webrtc::VoEAudioProcessing* processing() const { return processing_.get(); }
+ webrtc::VoEBase* base() const { return base_.get(); }
+ webrtc::VoECodec* codec() const { return codec_.get(); }
+ webrtc::VoEDtmf* dtmf() const { return dtmf_.get(); }
+ webrtc::VoEFile* file() const { return file_.get(); }
+ webrtc::VoEHardware* hw() const { return hw_.get(); }
+ webrtc::VoEExternalMedia* media() const { return media_.get(); }
+ webrtc::VoENetEqStats* neteq() const { return neteq_.get(); }
+ webrtc::VoENetwork* network() const { return network_.get(); }
+ webrtc::VoERTP_RTCP* rtp() const { return rtp_.get(); }
+ webrtc::VoEVideoSync* sync() const { return sync_.get(); }
+ webrtc::VoEVolumeControl* volume() const { return volume_.get(); }
+ int error() { return base_->LastError(); }
+
+ private:
+ scoped_voe_engine engine_;
+ scoped_voe_ptr<webrtc::VoEAudioProcessing> processing_;
+ scoped_voe_ptr<webrtc::VoEBase> base_;
+ scoped_voe_ptr<webrtc::VoECodec> codec_;
+ scoped_voe_ptr<webrtc::VoEDtmf> dtmf_;
+ scoped_voe_ptr<webrtc::VoEFile> file_;
+ scoped_voe_ptr<webrtc::VoEHardware> hw_;
+ scoped_voe_ptr<webrtc::VoEExternalMedia> media_;
+ scoped_voe_ptr<webrtc::VoENetEqStats> neteq_;
+ scoped_voe_ptr<webrtc::VoENetwork> network_;
+ scoped_voe_ptr<webrtc::VoERTP_RTCP> rtp_;
+ scoped_voe_ptr<webrtc::VoEVideoSync> sync_;
+ scoped_voe_ptr<webrtc::VoEVolumeControl> volume_;
+};
+
+// Adds indirection to static WebRtc functions, allowing them to be mocked.
+class VoETraceWrapper {
+ public:
+ virtual ~VoETraceWrapper() {}
+
+ virtual int SetTraceFilter(const unsigned int filter) {
+ return webrtc::VoiceEngine::SetTraceFilter(filter);
+ }
+ virtual int SetTraceFile(const char* fileNameUTF8) {
+ return webrtc::VoiceEngine::SetTraceFile(fileNameUTF8);
+ }
+ virtual int SetTraceCallback(webrtc::TraceCallback* callback) {
+ return webrtc::VoiceEngine::SetTraceCallback(callback);
+ }
+};
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTCVOE_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine.cc
new file mode 100644
index 00000000000..855a9e4236a
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine.cc
@@ -0,0 +1,3079 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#ifdef HAVE_WEBRTC_VOICE
+
+#include "talk/media/webrtc/webrtcvoiceengine.h"
+
+#include <algorithm>
+#include <cstdio>
+#include <string>
+#include <vector>
+
+#include "talk/base/base64.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/common.h"
+#include "talk/base/helpers.h"
+#include "talk/base/logging.h"
+#include "talk/base/stringencode.h"
+#include "talk/base/stringutils.h"
+#include "talk/media/base/audiorenderer.h"
+#include "talk/media/base/constants.h"
+#include "talk/media/base/streamparams.h"
+#include "talk/media/base/voiceprocessor.h"
+#include "talk/media/webrtc/webrtcvoe.h"
+#include "webrtc/modules/audio_processing/include/audio_processing.h"
+
+#ifdef WIN32
+#include <objbase.h> // NOLINT
+#endif
+
+namespace cricket {
+
+struct CodecPref {
+ const char* name;
+ int clockrate;
+ int channels;
+ int payload_type;
+ bool is_multi_rate;
+};
+
+static const CodecPref kCodecPrefs[] = {
+ { "OPUS", 48000, 2, 111, true },
+ { "ISAC", 16000, 1, 103, true },
+ { "ISAC", 32000, 1, 104, true },
+ { "CELT", 32000, 1, 109, true },
+ { "CELT", 32000, 2, 110, true },
+ { "G722", 16000, 1, 9, false },
+ { "ILBC", 8000, 1, 102, false },
+ { "PCMU", 8000, 1, 0, false },
+ { "PCMA", 8000, 1, 8, false },
+ { "CN", 48000, 1, 107, false },
+ { "CN", 32000, 1, 106, false },
+ { "CN", 16000, 1, 105, false },
+ { "CN", 8000, 1, 13, false },
+ { "red", 8000, 1, 127, false },
+ { "telephone-event", 8000, 1, 126, false },
+};
+
+// For Linux/Mac, using the default device is done by specifying index 0 for
+// VoE 4.0 and not -1 (which was the case for VoE 3.5).
+//
+// On Windows Vista and newer, Microsoft introduced the concept of "Default
+// Communications Device". This means that there are two types of default
+// devices (old Wave Audio style default and Default Communications Device).
+//
+// On Windows systems which only support Wave Audio style default, uses either
+// -1 or 0 to select the default device.
+//
+// On Windows systems which support both "Default Communication Device" and
+// old Wave Audio style default, use -1 for Default Communications Device and
+// -2 for Wave Audio style default, which is what we want to use for clips.
+// It's not clear yet whether the -2 index is handled properly on other OSes.
+
+#ifdef WIN32
+static const int kDefaultAudioDeviceId = -1;
+static const int kDefaultSoundclipDeviceId = -2;
+#else
+static const int kDefaultAudioDeviceId = 0;
+#endif
+
+// extension header for audio levels, as defined in
+// http://tools.ietf.org/html/draft-ietf-avtext-client-to-mixer-audio-level-03
+static const char kRtpAudioLevelHeaderExtension[] =
+ "urn:ietf:params:rtp-hdrext:ssrc-audio-level";
+static const int kRtpAudioLevelHeaderExtensionId = 1;
+
+static const char kIsacCodecName[] = "ISAC";
+static const char kL16CodecName[] = "L16";
+// Codec parameters for Opus.
+static const int kOpusMonoBitrate = 32000;
+// Parameter used for NACK.
+// This value is equivalent to 5 seconds of audio data at 20 ms per packet.
+static const int kNackMaxPackets = 250;
+static const int kOpusStereoBitrate = 64000;
+// draft-spittka-payload-rtp-opus-03
+// Opus bitrate should be in the range between 6000 and 510000.
+static const int kOpusMinBitrate = 6000;
+static const int kOpusMaxBitrate = 510000;
+
+#if defined(CHROMEOS)
+// Ensure we open the file in a writeable path on ChromeOS. This workaround
+// can be removed when it's possible to specify a filename for audio option
+// based AEC dumps.
+//
+// TODO(grunell): Use a string in the options instead of hardcoding it here
+// and let the embedder choose the filename (crbug.com/264223).
+//
+// NOTE(ajm): Don't use this hardcoded /tmp path on non-ChromeOS platforms.
+static const char kAecDumpByAudioOptionFilename[] = "/tmp/audio.aecdump";
+#else
+static const char kAecDumpByAudioOptionFilename[] = "audio.aecdump";
+#endif
+
+// Dumps an AudioCodec in RFC 2327-ish format.
+static std::string ToString(const AudioCodec& codec) {
+ std::stringstream ss;
+ ss << codec.name << "/" << codec.clockrate << "/" << codec.channels
+ << " (" << codec.id << ")";
+ return ss.str();
+}
+static std::string ToString(const webrtc::CodecInst& codec) {
+ std::stringstream ss;
+ ss << codec.plname << "/" << codec.plfreq << "/" << codec.channels
+ << " (" << codec.pltype << ")";
+ return ss.str();
+}
+
+static void LogMultiline(talk_base::LoggingSeverity sev, char* text) {
+ const char* delim = "\r\n";
+ for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
+ LOG_V(sev) << tok;
+ }
+}
+
+// Severity is an integer because it comes is assumed to be from command line.
+static int SeverityToFilter(int severity) {
+ int filter = webrtc::kTraceNone;
+ switch (severity) {
+ case talk_base::LS_VERBOSE:
+ filter |= webrtc::kTraceAll;
+ case talk_base::LS_INFO:
+ filter |= (webrtc::kTraceStateInfo | webrtc::kTraceInfo);
+ case talk_base::LS_WARNING:
+ filter |= (webrtc::kTraceTerseInfo | webrtc::kTraceWarning);
+ case talk_base::LS_ERROR:
+ filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
+ }
+ return filter;
+}
+
+static bool IsCodecMultiRate(const webrtc::CodecInst& codec) {
+ for (size_t i = 0; i < ARRAY_SIZE(kCodecPrefs); ++i) {
+ if (_stricmp(kCodecPrefs[i].name, codec.plname) == 0 &&
+ kCodecPrefs[i].clockrate == codec.plfreq) {
+ return kCodecPrefs[i].is_multi_rate;
+ }
+ }
+ return false;
+}
+
+static bool FindCodec(const std::vector<AudioCodec>& codecs,
+ const AudioCodec& codec,
+ AudioCodec* found_codec) {
+ for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
+ it != codecs.end(); ++it) {
+ if (it->Matches(codec)) {
+ if (found_codec != NULL) {
+ *found_codec = *it;
+ }
+ return true;
+ }
+ }
+ return false;
+}
+static bool IsNackEnabled(const AudioCodec& codec) {
+ return codec.HasFeedbackParam(FeedbackParam(kRtcpFbParamNack,
+ kParamValueEmpty));
+}
+
+
+class WebRtcSoundclipMedia : public SoundclipMedia {
+ public:
+ explicit WebRtcSoundclipMedia(WebRtcVoiceEngine *engine)
+ : engine_(engine), webrtc_channel_(-1) {
+ engine_->RegisterSoundclip(this);
+ }
+
+ virtual ~WebRtcSoundclipMedia() {
+ engine_->UnregisterSoundclip(this);
+ if (webrtc_channel_ != -1) {
+ // We shouldn't have to call Disable() here. DeleteChannel() should call
+ // StopPlayout() while deleting the channel. We should fix the bug
+ // inside WebRTC and remove the Disable() call bellow. This work is
+ // tracked by bug http://b/issue?id=5382855.
+ PlaySound(NULL, 0, 0);
+ Disable();
+ if (engine_->voe_sc()->base()->DeleteChannel(webrtc_channel_)
+ == -1) {
+ LOG_RTCERR1(DeleteChannel, webrtc_channel_);
+ }
+ }
+ }
+
+ bool Init() {
+ webrtc_channel_ = engine_->voe_sc()->base()->CreateChannel();
+ if (webrtc_channel_ == -1) {
+ LOG_RTCERR0(CreateChannel);
+ return false;
+ }
+ return true;
+ }
+
+ bool Enable() {
+ if (engine_->voe_sc()->base()->StartPlayout(webrtc_channel_) == -1) {
+ LOG_RTCERR1(StartPlayout, webrtc_channel_);
+ return false;
+ }
+ return true;
+ }
+
+ bool Disable() {
+ if (engine_->voe_sc()->base()->StopPlayout(webrtc_channel_) == -1) {
+ LOG_RTCERR1(StopPlayout, webrtc_channel_);
+ return false;
+ }
+ return true;
+ }
+
+ virtual bool PlaySound(const char *buf, int len, int flags) {
+ // The voe file api is not available in chrome.
+ if (!engine_->voe_sc()->file()) {
+ return false;
+ }
+ // Must stop playing the current sound (if any), because we are about to
+ // modify the stream.
+ if (engine_->voe_sc()->file()->StopPlayingFileLocally(webrtc_channel_)
+ == -1) {
+ LOG_RTCERR1(StopPlayingFileLocally, webrtc_channel_);
+ return false;
+ }
+
+ if (buf) {
+ stream_.reset(new WebRtcSoundclipStream(buf, len));
+ stream_->set_loop((flags & SF_LOOP) != 0);
+ stream_->Rewind();
+
+ // Play it.
+ if (engine_->voe_sc()->file()->StartPlayingFileLocally(
+ webrtc_channel_, stream_.get()) == -1) {
+ LOG_RTCERR2(StartPlayingFileLocally, webrtc_channel_, stream_.get());
+ LOG(LS_ERROR) << "Unable to start soundclip";
+ return false;
+ }
+ } else {
+ stream_.reset();
+ }
+ return true;
+ }
+
+ int GetLastEngineError() const { return engine_->voe_sc()->error(); }
+
+ private:
+ WebRtcVoiceEngine *engine_;
+ int webrtc_channel_;
+ talk_base::scoped_ptr<WebRtcSoundclipStream> stream_;
+};
+
+WebRtcVoiceEngine::WebRtcVoiceEngine()
+ : voe_wrapper_(new VoEWrapper()),
+ voe_wrapper_sc_(new VoEWrapper()),
+ tracing_(new VoETraceWrapper()),
+ adm_(NULL),
+ adm_sc_(NULL),
+ log_filter_(SeverityToFilter(kDefaultLogSeverity)),
+ is_dumping_aec_(false),
+ desired_local_monitor_enable_(false),
+ tx_processor_ssrc_(0),
+ rx_processor_ssrc_(0) {
+ Construct();
+}
+
+WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
+ VoEWrapper* voe_wrapper_sc,
+ VoETraceWrapper* tracing)
+ : voe_wrapper_(voe_wrapper),
+ voe_wrapper_sc_(voe_wrapper_sc),
+ tracing_(tracing),
+ adm_(NULL),
+ adm_sc_(NULL),
+ log_filter_(SeverityToFilter(kDefaultLogSeverity)),
+ is_dumping_aec_(false),
+ desired_local_monitor_enable_(false),
+ tx_processor_ssrc_(0),
+ rx_processor_ssrc_(0) {
+ Construct();
+}
+
+void WebRtcVoiceEngine::Construct() {
+ SetTraceFilter(log_filter_);
+ initialized_ = false;
+ LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine";
+ SetTraceOptions("");
+ if (tracing_->SetTraceCallback(this) == -1) {
+ LOG_RTCERR0(SetTraceCallback);
+ }
+ if (voe_wrapper_->base()->RegisterVoiceEngineObserver(*this) == -1) {
+ LOG_RTCERR0(RegisterVoiceEngineObserver);
+ }
+ // Clear the default agc state.
+ memset(&default_agc_config_, 0, sizeof(default_agc_config_));
+
+ // Load our audio codec list.
+ ConstructCodecs();
+
+ // Load our RTP Header extensions.
+ rtp_header_extensions_.push_back(
+ RtpHeaderExtension(kRtpAudioLevelHeaderExtension,
+ kRtpAudioLevelHeaderExtensionId));
+}
+
+static bool IsOpus(const AudioCodec& codec) {
+ return (_stricmp(codec.name.c_str(), kOpusCodecName) == 0);
+}
+
+static bool IsIsac(const AudioCodec& codec) {
+ return (_stricmp(codec.name.c_str(), kIsacCodecName) == 0);
+}
+
+// True if params["stereo"] == "1"
+static bool IsOpusStereoEnabled(const AudioCodec& codec) {
+ CodecParameterMap::const_iterator param =
+ codec.params.find(kCodecParamStereo);
+ if (param == codec.params.end()) {
+ return false;
+ }
+ return param->second == kParamValueTrue;
+}
+
+static bool IsValidOpusBitrate(int bitrate) {
+ return (bitrate >= kOpusMinBitrate && bitrate <= kOpusMaxBitrate);
+}
+
+// Returns 0 if params[kCodecParamMaxAverageBitrate] is not defined or invalid.
+// Returns the value of params[kCodecParamMaxAverageBitrate] otherwise.
+static int GetOpusBitrateFromParams(const AudioCodec& codec) {
+ int bitrate = 0;
+ if (!codec.GetParam(kCodecParamMaxAverageBitrate, &bitrate)) {
+ return 0;
+ }
+ if (!IsValidOpusBitrate(bitrate)) {
+ LOG(LS_WARNING) << "Codec parameter \"maxaveragebitrate\" has an "
+ << "invalid value: " << bitrate;
+ return 0;
+ }
+ return bitrate;
+}
+
+void WebRtcVoiceEngine::ConstructCodecs() {
+ LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
+ int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
+ for (int i = 0; i < ncodecs; ++i) {
+ webrtc::CodecInst voe_codec;
+ if (voe_wrapper_->codec()->GetCodec(i, voe_codec) != -1) {
+ // Skip uncompressed formats.
+ if (_stricmp(voe_codec.plname, kL16CodecName) == 0) {
+ continue;
+ }
+
+ const CodecPref* pref = NULL;
+ for (size_t j = 0; j < ARRAY_SIZE(kCodecPrefs); ++j) {
+ if (_stricmp(kCodecPrefs[j].name, voe_codec.plname) == 0 &&
+ kCodecPrefs[j].clockrate == voe_codec.plfreq &&
+ kCodecPrefs[j].channels == voe_codec.channels) {
+ pref = &kCodecPrefs[j];
+ break;
+ }
+ }
+
+ if (pref) {
+ // Use the payload type that we've configured in our pref table;
+ // use the offset in our pref table to determine the sort order.
+ AudioCodec codec(pref->payload_type, voe_codec.plname, voe_codec.plfreq,
+ voe_codec.rate, voe_codec.channels,
+ ARRAY_SIZE(kCodecPrefs) - (pref - kCodecPrefs));
+ LOG(LS_INFO) << ToString(codec);
+ if (IsIsac(codec)) {
+ // Indicate auto-bandwidth in signaling.
+ codec.bitrate = 0;
+ }
+ if (IsOpus(codec)) {
+ // Only add fmtp parameters that differ from the spec.
+ if (kPreferredMinPTime != kOpusDefaultMinPTime) {
+ codec.params[kCodecParamMinPTime] =
+ talk_base::ToString(kPreferredMinPTime);
+ }
+ if (kPreferredMaxPTime != kOpusDefaultMaxPTime) {
+ codec.params[kCodecParamMaxPTime] =
+ talk_base::ToString(kPreferredMaxPTime);
+ }
+ // TODO(hellner): Add ptime, sprop-stereo, stereo and useinbandfec
+ // when they can be set to values other than the default.
+ }
+ codecs_.push_back(codec);
+ } else {
+ LOG(LS_WARNING) << "Unexpected codec: " << ToString(voe_codec);
+ }
+ }
+ }
+ // Make sure they are in local preference order.
+ std::sort(codecs_.begin(), codecs_.end(), &AudioCodec::Preferable);
+}
+
+WebRtcVoiceEngine::~WebRtcVoiceEngine() {
+ LOG(LS_VERBOSE) << "WebRtcVoiceEngine::~WebRtcVoiceEngine";
+ if (voe_wrapper_->base()->DeRegisterVoiceEngineObserver() == -1) {
+ LOG_RTCERR0(DeRegisterVoiceEngineObserver);
+ }
+ if (adm_) {
+ voe_wrapper_.reset();
+ adm_->Release();
+ adm_ = NULL;
+ }
+ if (adm_sc_) {
+ voe_wrapper_sc_.reset();
+ adm_sc_->Release();
+ adm_sc_ = NULL;
+ }
+
+ // Test to see if the media processor was deregistered properly
+ ASSERT(SignalRxMediaFrame.is_empty());
+ ASSERT(SignalTxMediaFrame.is_empty());
+
+ tracing_->SetTraceCallback(NULL);
+}
+
+bool WebRtcVoiceEngine::Init(talk_base::Thread* worker_thread) {
+ LOG(LS_INFO) << "WebRtcVoiceEngine::Init";
+ bool res = InitInternal();
+ if (res) {
+ LOG(LS_INFO) << "WebRtcVoiceEngine::Init Done!";
+ } else {
+ LOG(LS_ERROR) << "WebRtcVoiceEngine::Init failed";
+ Terminate();
+ }
+ return res;
+}
+
+bool WebRtcVoiceEngine::InitInternal() {
+ // Temporarily turn logging level up for the Init call
+ int old_filter = log_filter_;
+ int extended_filter = log_filter_ | SeverityToFilter(talk_base::LS_INFO);
+ SetTraceFilter(extended_filter);
+ SetTraceOptions("");
+
+ // Init WebRtc VoiceEngine.
+ if (voe_wrapper_->base()->Init(adm_) == -1) {
+ LOG_RTCERR0_EX(Init, voe_wrapper_->error());
+ SetTraceFilter(old_filter);
+ return false;
+ }
+
+ SetTraceFilter(old_filter);
+ SetTraceOptions(log_options_);
+
+ // Log the VoiceEngine version info
+ char buffer[1024] = "";
+ voe_wrapper_->base()->GetVersion(buffer);
+ LOG(LS_INFO) << "WebRtc VoiceEngine Version:";
+ LogMultiline(talk_base::LS_INFO, buffer);
+
+ // Save the default AGC configuration settings. This must happen before
+ // calling SetOptions or the default will be overwritten.
+ if (voe_wrapper_->processing()->GetAgcConfig(default_agc_config_) == -1) {
+ LOG_RTCERR0(GetAGCConfig);
+ return false;
+ }
+
+ if (!SetOptions(MediaEngineInterface::DEFAULT_AUDIO_OPTIONS)) {
+ return false;
+ }
+
+ // Print our codec list again for the call diagnostic log
+ LOG(LS_INFO) << "WebRtc VoiceEngine codecs:";
+ for (std::vector<AudioCodec>::const_iterator it = codecs_.begin();
+ it != codecs_.end(); ++it) {
+ LOG(LS_INFO) << ToString(*it);
+ }
+
+#if defined(LINUX) && !defined(HAVE_LIBPULSE)
+ voe_wrapper_sc_->hw()->SetAudioDeviceLayer(webrtc::kAudioLinuxAlsa);
+#endif
+
+ // Initialize the VoiceEngine instance that we'll use to play out sound clips.
+ if (voe_wrapper_sc_->base()->Init(adm_sc_) == -1) {
+ LOG_RTCERR0_EX(Init, voe_wrapper_sc_->error());
+ return false;
+ }
+
+ // On Windows, tell it to use the default sound (not communication) devices.
+ // First check whether there is a valid sound device for playback.
+ // TODO(juberti): Clean this up when we support setting the soundclip device.
+#ifdef WIN32
+ // The SetPlayoutDevice may not be implemented in the case of external ADM.
+ // TODO(ronghuawu): We should only check the adm_sc_ here, but current
+ // PeerConnection interface never set the adm_sc_, so need to check both
+ // in order to determine if the external adm is used.
+ if (!adm_ && !adm_sc_) {
+ int num_of_devices = 0;
+ if (voe_wrapper_sc_->hw()->GetNumOfPlayoutDevices(num_of_devices) != -1 &&
+ num_of_devices > 0) {
+ if (voe_wrapper_sc_->hw()->SetPlayoutDevice(kDefaultSoundclipDeviceId)
+ == -1) {
+ LOG_RTCERR1_EX(SetPlayoutDevice, kDefaultSoundclipDeviceId,
+ voe_wrapper_sc_->error());
+ return false;
+ }
+ } else {
+ LOG(LS_WARNING) << "No valid sound playout device found.";
+ }
+ }
+#endif
+
+ // Disable the DTMF playout when a tone is sent.
+ // PlayDtmfTone will be used if local playout is needed.
+ if (voe_wrapper_->dtmf()->SetDtmfFeedbackStatus(false) == -1) {
+ LOG_RTCERR1(SetDtmfFeedbackStatus, false);
+ }
+
+ initialized_ = true;
+ return true;
+}
+
+void WebRtcVoiceEngine::Terminate() {
+ LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate";
+ initialized_ = false;
+
+ StopAecDump();
+
+ voe_wrapper_sc_->base()->Terminate();
+ voe_wrapper_->base()->Terminate();
+ desired_local_monitor_enable_ = false;
+}
+
+int WebRtcVoiceEngine::GetCapabilities() {
+ return AUDIO_SEND | AUDIO_RECV;
+}
+
+VoiceMediaChannel *WebRtcVoiceEngine::CreateChannel() {
+ WebRtcVoiceMediaChannel* ch = new WebRtcVoiceMediaChannel(this);
+ if (!ch->valid()) {
+ delete ch;
+ ch = NULL;
+ }
+ return ch;
+}
+
+SoundclipMedia *WebRtcVoiceEngine::CreateSoundclip() {
+ WebRtcSoundclipMedia *soundclip = new WebRtcSoundclipMedia(this);
+ if (!soundclip->Init() || !soundclip->Enable()) {
+ delete soundclip;
+ return NULL;
+ }
+ return soundclip;
+}
+
+// TODO(zhurunz): Add a comprehensive unittests for SetOptions().
+bool WebRtcVoiceEngine::SetOptions(int flags) {
+ AudioOptions options;
+
+ // Convert flags to AudioOptions.
+ options.echo_cancellation.Set(
+ ((flags & MediaEngineInterface::ECHO_CANCELLATION) != 0));
+ options.auto_gain_control.Set(
+ ((flags & MediaEngineInterface::AUTO_GAIN_CONTROL) != 0));
+ options.noise_suppression.Set(
+ ((flags & MediaEngineInterface::NOISE_SUPPRESSION) != 0));
+ options.highpass_filter.Set(
+ ((flags & MediaEngineInterface::HIGHPASS_FILTER) != 0));
+ options.stereo_swapping.Set(
+ ((flags & MediaEngineInterface::STEREO_FLIPPING) != 0));
+
+ // Set defaults for flagless options here. Make sure they are all set so that
+ // ApplyOptions applies all of them when we clear overrides.
+ options.typing_detection.Set(true);
+ options.conference_mode.Set(false);
+ options.adjust_agc_delta.Set(0);
+ options.experimental_agc.Set(false);
+ options.experimental_aec.Set(false);
+ options.aec_dump.Set(false);
+
+ return SetAudioOptions(options);
+}
+
+bool WebRtcVoiceEngine::SetAudioOptions(const AudioOptions& options) {
+ if (!ApplyOptions(options)) {
+ return false;
+ }
+ options_ = options;
+ return true;
+}
+
+bool WebRtcVoiceEngine::SetOptionOverrides(const AudioOptions& overrides) {
+ LOG(LS_INFO) << "Setting option overrides: " << overrides.ToString();
+ if (!ApplyOptions(overrides)) {
+ return false;
+ }
+ option_overrides_ = overrides;
+ return true;
+}
+
+bool WebRtcVoiceEngine::ClearOptionOverrides() {
+ LOG(LS_INFO) << "Clearing option overrides.";
+ AudioOptions options = options_;
+ // Only call ApplyOptions if |options_overrides_| contains overrided options.
+ // ApplyOptions affects NS, AGC other options that is shared between
+ // all WebRtcVoiceEngineChannels.
+ if (option_overrides_ == AudioOptions()) {
+ return true;
+ }
+
+ if (!ApplyOptions(options)) {
+ return false;
+ }
+ option_overrides_ = AudioOptions();
+ return true;
+}
+
+// AudioOptions defaults are set in InitInternal (for options with corresponding
+// MediaEngineInterface flags) and in SetOptions(int) for flagless options.
+bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) {
+ AudioOptions options = options_in; // The options are modified below.
+ // kEcConference is AEC with high suppression.
+ webrtc::EcModes ec_mode = webrtc::kEcConference;
+ webrtc::AecmModes aecm_mode = webrtc::kAecmSpeakerphone;
+ webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog;
+ webrtc::NsModes ns_mode = webrtc::kNsHighSuppression;
+ bool aecm_comfort_noise = false;
+
+#if defined(IOS)
+ // On iOS, VPIO provides built-in EC and AGC.
+ options.echo_cancellation.Set(false);
+ options.auto_gain_control.Set(false);
+#elif defined(ANDROID)
+ ec_mode = webrtc::kEcAecm;
+#endif
+
+#if defined(IOS) || defined(ANDROID)
+ // Set the AGC mode for iOS as well despite disabling it above, to avoid
+ // unsupported configuration errors from webrtc.
+ agc_mode = webrtc::kAgcFixedDigital;
+ options.typing_detection.Set(false);
+ options.experimental_agc.Set(false);
+ options.experimental_aec.Set(false);
+#endif
+
+
+ LOG(LS_INFO) << "Applying audio options: " << options.ToString();
+
+ webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing();
+
+ bool echo_cancellation;
+ if (options.echo_cancellation.Get(&echo_cancellation)) {
+ if (voep->SetEcStatus(echo_cancellation, ec_mode) == -1) {
+ LOG_RTCERR2(SetEcStatus, echo_cancellation, ec_mode);
+ return false;
+ }
+#if !defined(ANDROID)
+ // TODO(ajm): Remove the error return on Android from webrtc.
+ if (voep->SetEcMetricsStatus(echo_cancellation) == -1) {
+ LOG_RTCERR1(SetEcMetricsStatus, echo_cancellation);
+ return false;
+ }
+#endif
+ if (ec_mode == webrtc::kEcAecm) {
+ if (voep->SetAecmMode(aecm_mode, aecm_comfort_noise) != 0) {
+ LOG_RTCERR2(SetAecmMode, aecm_mode, aecm_comfort_noise);
+ return false;
+ }
+ }
+ }
+
+ bool auto_gain_control;
+ if (options.auto_gain_control.Get(&auto_gain_control)) {
+ if (voep->SetAgcStatus(auto_gain_control, agc_mode) == -1) {
+ LOG_RTCERR2(SetAgcStatus, auto_gain_control, agc_mode);
+ return false;
+ }
+ }
+
+ bool noise_suppression;
+ if (options.noise_suppression.Get(&noise_suppression)) {
+ if (voep->SetNsStatus(noise_suppression, ns_mode) == -1) {
+ LOG_RTCERR2(SetNsStatus, noise_suppression, ns_mode);
+ return false;
+ }
+ }
+
+ bool highpass_filter;
+ if (options.highpass_filter.Get(&highpass_filter)) {
+ if (voep->EnableHighPassFilter(highpass_filter) == -1) {
+ LOG_RTCERR1(SetHighpassFilterStatus, highpass_filter);
+ return false;
+ }
+ }
+
+ bool stereo_swapping;
+ if (options.stereo_swapping.Get(&stereo_swapping)) {
+ voep->EnableStereoChannelSwapping(stereo_swapping);
+ if (voep->IsStereoChannelSwappingEnabled() != stereo_swapping) {
+ LOG_RTCERR1(EnableStereoChannelSwapping, stereo_swapping);
+ return false;
+ }
+ }
+
+ bool typing_detection;
+ if (options.typing_detection.Get(&typing_detection)) {
+ if (voep->SetTypingDetectionStatus(typing_detection) == -1) {
+ // In case of error, log the info and continue
+ LOG_RTCERR1(SetTypingDetectionStatus, typing_detection);
+ }
+ }
+
+ int adjust_agc_delta;
+ if (options.adjust_agc_delta.Get(&adjust_agc_delta)) {
+ if (!AdjustAgcLevel(adjust_agc_delta)) {
+ return false;
+ }
+ }
+
+ bool aec_dump;
+ if (options.aec_dump.Get(&aec_dump)) {
+ if (aec_dump)
+ StartAecDump(kAecDumpByAudioOptionFilename);
+ else
+ StopAecDump();
+ }
+
+
+ return true;
+}
+
+bool WebRtcVoiceEngine::SetDelayOffset(int offset) {
+ voe_wrapper_->processing()->SetDelayOffsetMs(offset);
+ if (voe_wrapper_->processing()->DelayOffsetMs() != offset) {
+ LOG_RTCERR1(SetDelayOffsetMs, offset);
+ return false;
+ }
+
+ return true;
+}
+
+struct ResumeEntry {
+ ResumeEntry(WebRtcVoiceMediaChannel *c, bool p, SendFlags s)
+ : channel(c),
+ playout(p),
+ send(s) {
+ }
+
+ WebRtcVoiceMediaChannel *channel;
+ bool playout;
+ SendFlags send;
+};
+
+// TODO(juberti): Refactor this so that the core logic can be used to set the
+// soundclip device. At that time, reinstate the soundclip pause/resume code.
+bool WebRtcVoiceEngine::SetDevices(const Device* in_device,
+ const Device* out_device) {
+#if !defined(IOS) && !defined(ANDROID)
+ int in_id = in_device ? talk_base::FromString<int>(in_device->id) :
+ kDefaultAudioDeviceId;
+ int out_id = out_device ? talk_base::FromString<int>(out_device->id) :
+ kDefaultAudioDeviceId;
+ // The device manager uses -1 as the default device, which was the case for
+ // VoE 3.5. VoE 4.0, however, uses 0 as the default in Linux and Mac.
+#ifndef WIN32
+ if (-1 == in_id) {
+ in_id = kDefaultAudioDeviceId;
+ }
+ if (-1 == out_id) {
+ out_id = kDefaultAudioDeviceId;
+ }
+#endif
+
+ std::string in_name = (in_id != kDefaultAudioDeviceId) ?
+ in_device->name : "Default device";
+ std::string out_name = (out_id != kDefaultAudioDeviceId) ?
+ out_device->name : "Default device";
+ LOG(LS_INFO) << "Setting microphone to (id=" << in_id << ", name=" << in_name
+ << ") and speaker to (id=" << out_id << ", name=" << out_name
+ << ")";
+
+ // If we're running the local monitor, we need to stop it first.
+ bool ret = true;
+ if (!PauseLocalMonitor()) {
+ LOG(LS_WARNING) << "Failed to pause local monitor";
+ ret = false;
+ }
+
+ // Must also pause all audio playback and capture.
+ for (ChannelList::const_iterator i = channels_.begin();
+ i != channels_.end(); ++i) {
+ WebRtcVoiceMediaChannel *channel = *i;
+ if (!channel->PausePlayout()) {
+ LOG(LS_WARNING) << "Failed to pause playout";
+ ret = false;
+ }
+ if (!channel->PauseSend()) {
+ LOG(LS_WARNING) << "Failed to pause send";
+ ret = false;
+ }
+ }
+
+ // Find the recording device id in VoiceEngine and set recording device.
+ if (!FindWebRtcAudioDeviceId(true, in_name, in_id, &in_id)) {
+ ret = false;
+ }
+ if (ret) {
+ if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) {
+ LOG_RTCERR2(SetRecordingDevice, in_device->name, in_id);
+ ret = false;
+ }
+ }
+
+ // Find the playout device id in VoiceEngine and set playout device.
+ if (!FindWebRtcAudioDeviceId(false, out_name, out_id, &out_id)) {
+ LOG(LS_WARNING) << "Failed to find VoiceEngine device id for " << out_name;
+ ret = false;
+ }
+ if (ret) {
+ if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) {
+ LOG_RTCERR2(SetPlayoutDevice, out_device->name, out_id);
+ ret = false;
+ }
+ }
+
+ // Resume all audio playback and capture.
+ for (ChannelList::const_iterator i = channels_.begin();
+ i != channels_.end(); ++i) {
+ WebRtcVoiceMediaChannel *channel = *i;
+ if (!channel->ResumePlayout()) {
+ LOG(LS_WARNING) << "Failed to resume playout";
+ ret = false;
+ }
+ if (!channel->ResumeSend()) {
+ LOG(LS_WARNING) << "Failed to resume send";
+ ret = false;
+ }
+ }
+
+ // Resume local monitor.
+ if (!ResumeLocalMonitor()) {
+ LOG(LS_WARNING) << "Failed to resume local monitor";
+ ret = false;
+ }
+
+ if (ret) {
+ LOG(LS_INFO) << "Set microphone to (id=" << in_id <<" name=" << in_name
+ << ") and speaker to (id="<< out_id << " name=" << out_name
+ << ")";
+ }
+
+ return ret;
+#else
+ return true;
+#endif // !IOS && !ANDROID
+}
+
+bool WebRtcVoiceEngine::FindWebRtcAudioDeviceId(
+ bool is_input, const std::string& dev_name, int dev_id, int* rtc_id) {
+ // In Linux, VoiceEngine uses the same device dev_id as the device manager.
+#ifdef LINUX
+ *rtc_id = dev_id;
+ return true;
+#else
+ // In Windows and Mac, we need to find the VoiceEngine device id by name
+ // unless the input dev_id is the default device id.
+ if (kDefaultAudioDeviceId == dev_id) {
+ *rtc_id = dev_id;
+ return true;
+ }
+
+ // Get the number of VoiceEngine audio devices.
+ int count = 0;
+ if (is_input) {
+ if (-1 == voe_wrapper_->hw()->GetNumOfRecordingDevices(count)) {
+ LOG_RTCERR0(GetNumOfRecordingDevices);
+ return false;
+ }
+ } else {
+ if (-1 == voe_wrapper_->hw()->GetNumOfPlayoutDevices(count)) {
+ LOG_RTCERR0(GetNumOfPlayoutDevices);
+ return false;
+ }
+ }
+
+ for (int i = 0; i < count; ++i) {
+ char name[128];
+ char guid[128];
+ if (is_input) {
+ voe_wrapper_->hw()->GetRecordingDeviceName(i, name, guid);
+ LOG(LS_VERBOSE) << "VoiceEngine microphone " << i << ": " << name;
+ } else {
+ voe_wrapper_->hw()->GetPlayoutDeviceName(i, name, guid);
+ LOG(LS_VERBOSE) << "VoiceEngine speaker " << i << ": " << name;
+ }
+
+ std::string webrtc_name(name);
+ if (dev_name.compare(0, webrtc_name.size(), webrtc_name) == 0) {
+ *rtc_id = i;
+ return true;
+ }
+ }
+ LOG(LS_WARNING) << "VoiceEngine cannot find device: " << dev_name;
+ return false;
+#endif
+}
+
+bool WebRtcVoiceEngine::GetOutputVolume(int* level) {
+ unsigned int ulevel;
+ if (voe_wrapper_->volume()->GetSpeakerVolume(ulevel) == -1) {
+ LOG_RTCERR1(GetSpeakerVolume, level);
+ return false;
+ }
+ *level = ulevel;
+ return true;
+}
+
+bool WebRtcVoiceEngine::SetOutputVolume(int level) {
+ ASSERT(level >= 0 && level <= 255);
+ if (voe_wrapper_->volume()->SetSpeakerVolume(level) == -1) {
+ LOG_RTCERR1(SetSpeakerVolume, level);
+ return false;
+ }
+ return true;
+}
+
+int WebRtcVoiceEngine::GetInputLevel() {
+ unsigned int ulevel;
+ return (voe_wrapper_->volume()->GetSpeechInputLevel(ulevel) != -1) ?
+ static_cast<int>(ulevel) : -1;
+}
+
+bool WebRtcVoiceEngine::SetLocalMonitor(bool enable) {
+ desired_local_monitor_enable_ = enable;
+ return ChangeLocalMonitor(desired_local_monitor_enable_);
+}
+
+bool WebRtcVoiceEngine::ChangeLocalMonitor(bool enable) {
+ // The voe file api is not available in chrome.
+ if (!voe_wrapper_->file()) {
+ return false;
+ }
+ if (enable && !monitor_) {
+ monitor_.reset(new WebRtcMonitorStream);
+ if (voe_wrapper_->file()->StartRecordingMicrophone(monitor_.get()) == -1) {
+ LOG_RTCERR1(StartRecordingMicrophone, monitor_.get());
+ // Must call Stop() because there are some cases where Start will report
+ // failure but still change the state, and if we leave VE in the on state
+ // then it could crash later when trying to invoke methods on our monitor.
+ voe_wrapper_->file()->StopRecordingMicrophone();
+ monitor_.reset();
+ return false;
+ }
+ } else if (!enable && monitor_) {
+ voe_wrapper_->file()->StopRecordingMicrophone();
+ monitor_.reset();
+ }
+ return true;
+}
+
+bool WebRtcVoiceEngine::PauseLocalMonitor() {
+ return ChangeLocalMonitor(false);
+}
+
+bool WebRtcVoiceEngine::ResumeLocalMonitor() {
+ return ChangeLocalMonitor(desired_local_monitor_enable_);
+}
+
+const std::vector<AudioCodec>& WebRtcVoiceEngine::codecs() {
+ return codecs_;
+}
+
+bool WebRtcVoiceEngine::FindCodec(const AudioCodec& in) {
+ return FindWebRtcCodec(in, NULL);
+}
+
+// Get the VoiceEngine codec that matches |in|, with the supplied settings.
+bool WebRtcVoiceEngine::FindWebRtcCodec(const AudioCodec& in,
+ webrtc::CodecInst* out) {
+ int ncodecs = voe_wrapper_->codec()->NumOfCodecs();
+ for (int i = 0; i < ncodecs; ++i) {
+ webrtc::CodecInst voe_codec;
+ if (voe_wrapper_->codec()->GetCodec(i, voe_codec) != -1) {
+ AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq,
+ voe_codec.rate, voe_codec.channels, 0);
+ bool multi_rate = IsCodecMultiRate(voe_codec);
+ // Allow arbitrary rates for ISAC to be specified.
+ if (multi_rate) {
+ // Set codec.bitrate to 0 so the check for codec.Matches() passes.
+ codec.bitrate = 0;
+ }
+ if (codec.Matches(in)) {
+ if (out) {
+ // Fixup the payload type.
+ voe_codec.pltype = in.id;
+
+ // Set bitrate if specified.
+ if (multi_rate && in.bitrate != 0) {
+ voe_codec.rate = in.bitrate;
+ }
+
+ // Apply codec-specific settings.
+ if (IsIsac(codec)) {
+ // If ISAC and an explicit bitrate is not specified,
+ // enable auto bandwidth adjustment.
+ voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1;
+ }
+ *out = voe_codec;
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+const std::vector<RtpHeaderExtension>&
+WebRtcVoiceEngine::rtp_header_extensions() const {
+ return rtp_header_extensions_;
+}
+
+void WebRtcVoiceEngine::SetLogging(int min_sev, const char* filter) {
+ // if min_sev == -1, we keep the current log level.
+ if (min_sev >= 0) {
+ SetTraceFilter(SeverityToFilter(min_sev));
+ }
+ log_options_ = filter;
+ SetTraceOptions(initialized_ ? log_options_ : "");
+}
+
+int WebRtcVoiceEngine::GetLastEngineError() {
+ return voe_wrapper_->error();
+}
+
+void WebRtcVoiceEngine::SetTraceFilter(int filter) {
+ log_filter_ = filter;
+ tracing_->SetTraceFilter(filter);
+}
+
+// We suppport three different logging settings for VoiceEngine:
+// 1. Observer callback that goes into talk diagnostic logfile.
+// Use --logfile and --loglevel
+//
+// 2. Encrypted VoiceEngine log for debugging VoiceEngine.
+// Use --voice_loglevel --voice_logfilter "tracefile file_name"
+//
+// 3. EC log and dump for debugging QualityEngine.
+// Use --voice_loglevel --voice_logfilter "recordEC file_name"
+//
+// For more details see: "https://sites.google.com/a/google.com/wavelet/Home/
+// Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters"
+void WebRtcVoiceEngine::SetTraceOptions(const std::string& options) {
+ // Set encrypted trace file.
+ std::vector<std::string> opts;
+ talk_base::tokenize(options, ' ', '"', '"', &opts);
+ std::vector<std::string>::iterator tracefile =
+ std::find(opts.begin(), opts.end(), "tracefile");
+ if (tracefile != opts.end() && ++tracefile != opts.end()) {
+ // Write encrypted debug output (at same loglevel) to file
+ // EncryptedTraceFile no longer supported.
+ if (tracing_->SetTraceFile(tracefile->c_str()) == -1) {
+ LOG_RTCERR1(SetTraceFile, *tracefile);
+ }
+ }
+
+ // Set AEC dump file
+ std::vector<std::string>::iterator recordEC =
+ std::find(opts.begin(), opts.end(), "recordEC");
+ if (recordEC != opts.end()) {
+ ++recordEC;
+ if (recordEC != opts.end())
+ StartAecDump(recordEC->c_str());
+ else
+ StopAecDump();
+ }
+}
+
+// Ignore spammy trace messages, mostly from the stats API when we haven't
+// gotten RTCP info yet from the remote side.
+bool WebRtcVoiceEngine::ShouldIgnoreTrace(const std::string& trace) {
+ static const char* kTracesToIgnore[] = {
+ "\tfailed to GetReportBlockInformation",
+ "GetRecCodec() failed to get received codec",
+ "GetReceivedRtcpStatistics: Could not get received RTP statistics",
+ "GetRemoteRTCPData() failed to measure statistics due to lack of received RTP and/or RTCP packets", // NOLINT
+ "GetRemoteRTCPData() failed to retrieve sender info for remote side",
+ "GetRTPStatistics() failed to measure RTT since no RTP packets have been received yet", // NOLINT
+ "GetRTPStatistics() failed to read RTP statistics from the RTP/RTCP module",
+ "GetRTPStatistics() failed to retrieve RTT from the RTP/RTCP module",
+ "SenderInfoReceived No received SR",
+ "StatisticsRTP() no statistics available",
+ "TransmitMixer::TypingDetection() VE_TYPING_NOISE_WARNING message has been posted", // NOLINT
+ "TransmitMixer::TypingDetection() pending noise-saturation warning exists", // NOLINT
+ "GetRecPayloadType() failed to retrieve RX payload type (error=10026)", // NOLINT
+ "StopPlayingFileAsMicrophone() isnot playing (error=8088)",
+ NULL
+ };
+ for (const char* const* p = kTracesToIgnore; *p; ++p) {
+ if (trace.find(*p) != std::string::npos) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void WebRtcVoiceEngine::Print(webrtc::TraceLevel level, const char* trace,
+ int length) {
+ talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
+ if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
+ sev = talk_base::LS_ERROR;
+ else if (level == webrtc::kTraceWarning)
+ sev = talk_base::LS_WARNING;
+ else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo)
+ sev = talk_base::LS_INFO;
+ else if (level == webrtc::kTraceTerseInfo)
+ sev = talk_base::LS_INFO;
+
+ // Skip past boilerplate prefix text
+ if (length < 72) {
+ std::string msg(trace, length);
+ LOG(LS_ERROR) << "Malformed webrtc log message: ";
+ LOG_V(sev) << msg;
+ } else {
+ std::string msg(trace + 71, length - 72);
+ if (!ShouldIgnoreTrace(msg)) {
+ LOG_V(sev) << "webrtc: " << msg;
+ }
+ }
+}
+
+void WebRtcVoiceEngine::CallbackOnError(int channel_num, int err_code) {
+ talk_base::CritScope lock(&channels_cs_);
+ WebRtcVoiceMediaChannel* channel = NULL;
+ uint32 ssrc = 0;
+ LOG(LS_WARNING) << "VoiceEngine error " << err_code << " reported on channel "
+ << channel_num << ".";
+ if (FindChannelAndSsrc(channel_num, &channel, &ssrc)) {
+ ASSERT(channel != NULL);
+ channel->OnError(ssrc, err_code);
+ } else {
+ LOG(LS_ERROR) << "VoiceEngine channel " << channel_num
+ << " could not be found in channel list when error reported.";
+ }
+}
+
+bool WebRtcVoiceEngine::FindChannelAndSsrc(
+ int channel_num, WebRtcVoiceMediaChannel** channel, uint32* ssrc) const {
+ ASSERT(channel != NULL && ssrc != NULL);
+
+ *channel = NULL;
+ *ssrc = 0;
+ // Find corresponding channel and ssrc
+ for (ChannelList::const_iterator it = channels_.begin();
+ it != channels_.end(); ++it) {
+ ASSERT(*it != NULL);
+ if ((*it)->FindSsrc(channel_num, ssrc)) {
+ *channel = *it;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// This method will search through the WebRtcVoiceMediaChannels and
+// obtain the voice engine's channel number.
+bool WebRtcVoiceEngine::FindChannelNumFromSsrc(
+ uint32 ssrc, MediaProcessorDirection direction, int* channel_num) {
+ ASSERT(channel_num != NULL);
+ ASSERT(direction == MPD_RX || direction == MPD_TX);
+
+ *channel_num = -1;
+ // Find corresponding channel for ssrc.
+ for (ChannelList::const_iterator it = channels_.begin();
+ it != channels_.end(); ++it) {
+ ASSERT(*it != NULL);
+ if (direction & MPD_RX) {
+ *channel_num = (*it)->GetReceiveChannelNum(ssrc);
+ }
+ if (*channel_num == -1 && (direction & MPD_TX)) {
+ *channel_num = (*it)->GetSendChannelNum(ssrc);
+ }
+ if (*channel_num != -1) {
+ return true;
+ }
+ }
+ LOG(LS_WARNING) << "FindChannelFromSsrc. No Channel Found for Ssrc: " << ssrc;
+ return false;
+}
+
+void WebRtcVoiceEngine::RegisterChannel(WebRtcVoiceMediaChannel *channel) {
+ talk_base::CritScope lock(&channels_cs_);
+ channels_.push_back(channel);
+}
+
+void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel *channel) {
+ talk_base::CritScope lock(&channels_cs_);
+ ChannelList::iterator i = std::find(channels_.begin(),
+ channels_.end(),
+ channel);
+ if (i != channels_.end()) {
+ channels_.erase(i);
+ }
+}
+
+void WebRtcVoiceEngine::RegisterSoundclip(WebRtcSoundclipMedia *soundclip) {
+ soundclips_.push_back(soundclip);
+}
+
+void WebRtcVoiceEngine::UnregisterSoundclip(WebRtcSoundclipMedia *soundclip) {
+ SoundclipList::iterator i = std::find(soundclips_.begin(),
+ soundclips_.end(),
+ soundclip);
+ if (i != soundclips_.end()) {
+ soundclips_.erase(i);
+ }
+}
+
+// Adjusts the default AGC target level by the specified delta.
+// NB: If we start messing with other config fields, we'll want
+// to save the current webrtc::AgcConfig as well.
+bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) {
+ webrtc::AgcConfig config = default_agc_config_;
+ config.targetLeveldBOv -= delta;
+
+ LOG(LS_INFO) << "Adjusting AGC level from default -"
+ << default_agc_config_.targetLeveldBOv << "dB to -"
+ << config.targetLeveldBOv << "dB";
+
+ if (voe_wrapper_->processing()->SetAgcConfig(config) == -1) {
+ LOG_RTCERR1(SetAgcConfig, config.targetLeveldBOv);
+ return false;
+ }
+ return true;
+}
+
+bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm,
+ webrtc::AudioDeviceModule* adm_sc) {
+ if (initialized_) {
+ LOG(LS_WARNING) << "SetAudioDeviceModule can not be called after Init.";
+ return false;
+ }
+ if (adm_) {
+ adm_->Release();
+ adm_ = NULL;
+ }
+ if (adm) {
+ adm_ = adm;
+ adm_->AddRef();
+ }
+
+ if (adm_sc_) {
+ adm_sc_->Release();
+ adm_sc_ = NULL;
+ }
+ if (adm_sc) {
+ adm_sc_ = adm_sc;
+ adm_sc_->AddRef();
+ }
+ return true;
+}
+
+bool WebRtcVoiceEngine::RegisterProcessor(
+ uint32 ssrc,
+ VoiceProcessor* voice_processor,
+ MediaProcessorDirection direction) {
+ bool register_with_webrtc = false;
+ int channel_id = -1;
+ bool success = false;
+ uint32* processor_ssrc = NULL;
+ bool found_channel = FindChannelNumFromSsrc(ssrc, direction, &channel_id);
+ if (voice_processor == NULL || !found_channel) {
+ LOG(LS_WARNING) << "Media Processing Registration Failed. ssrc: " << ssrc
+ << " foundChannel: " << found_channel;
+ return false;
+ }
+
+ webrtc::ProcessingTypes processing_type;
+ {
+ talk_base::CritScope cs(&signal_media_critical_);
+ if (direction == MPD_RX) {
+ processing_type = webrtc::kPlaybackAllChannelsMixed;
+ if (SignalRxMediaFrame.is_empty()) {
+ register_with_webrtc = true;
+ processor_ssrc = &rx_processor_ssrc_;
+ }
+ SignalRxMediaFrame.connect(voice_processor,
+ &VoiceProcessor::OnFrame);
+ } else {
+ processing_type = webrtc::kRecordingPerChannel;
+ if (SignalTxMediaFrame.is_empty()) {
+ register_with_webrtc = true;
+ processor_ssrc = &tx_processor_ssrc_;
+ }
+ SignalTxMediaFrame.connect(voice_processor,
+ &VoiceProcessor::OnFrame);
+ }
+ }
+ if (register_with_webrtc) {
+ // TODO(janahan): when registering consider instantiating a
+ // a VoeMediaProcess object and not make the engine extend the interface.
+ if (voe()->media() && voe()->media()->
+ RegisterExternalMediaProcessing(channel_id,
+ processing_type,
+ *this) != -1) {
+ LOG(LS_INFO) << "Media Processing Registration Succeeded. channel:"
+ << channel_id;
+ *processor_ssrc = ssrc;
+ success = true;
+ } else {
+ LOG_RTCERR2(RegisterExternalMediaProcessing,
+ channel_id,
+ processing_type);
+ success = false;
+ }
+ } else {
+ // If we don't have to register with the engine, we just needed to
+ // connect a new processor, set success to true;
+ success = true;
+ }
+ return success;
+}
+
+bool WebRtcVoiceEngine::UnregisterProcessorChannel(
+ MediaProcessorDirection channel_direction,
+ uint32 ssrc,
+ VoiceProcessor* voice_processor,
+ MediaProcessorDirection processor_direction) {
+ bool success = true;
+ FrameSignal* signal;
+ webrtc::ProcessingTypes processing_type;
+ uint32* processor_ssrc = NULL;
+ if (channel_direction == MPD_RX) {
+ signal = &SignalRxMediaFrame;
+ processing_type = webrtc::kPlaybackAllChannelsMixed;
+ processor_ssrc = &rx_processor_ssrc_;
+ } else {
+ signal = &SignalTxMediaFrame;
+ processing_type = webrtc::kRecordingPerChannel;
+ processor_ssrc = &tx_processor_ssrc_;
+ }
+
+ int deregister_id = -1;
+ {
+ talk_base::CritScope cs(&signal_media_critical_);
+ if ((processor_direction & channel_direction) != 0 && !signal->is_empty()) {
+ signal->disconnect(voice_processor);
+ int channel_id = -1;
+ bool found_channel = FindChannelNumFromSsrc(ssrc,
+ channel_direction,
+ &channel_id);
+ if (signal->is_empty() && found_channel) {
+ deregister_id = channel_id;
+ }
+ }
+ }
+ if (deregister_id != -1) {
+ if (voe()->media() &&
+ voe()->media()->DeRegisterExternalMediaProcessing(deregister_id,
+ processing_type) != -1) {
+ *processor_ssrc = 0;
+ LOG(LS_INFO) << "Media Processing DeRegistration Succeeded. channel:"
+ << deregister_id;
+ } else {
+ LOG_RTCERR2(DeRegisterExternalMediaProcessing,
+ deregister_id,
+ processing_type);
+ success = false;
+ }
+ }
+ return success;
+}
+
+bool WebRtcVoiceEngine::UnregisterProcessor(
+ uint32 ssrc,
+ VoiceProcessor* voice_processor,
+ MediaProcessorDirection direction) {
+ bool success = true;
+ if (voice_processor == NULL) {
+ LOG(LS_WARNING) << "Media Processing Deregistration Failed. ssrc: "
+ << ssrc;
+ return false;
+ }
+ if (!UnregisterProcessorChannel(MPD_RX, ssrc, voice_processor, direction)) {
+ success = false;
+ }
+ if (!UnregisterProcessorChannel(MPD_TX, ssrc, voice_processor, direction)) {
+ success = false;
+ }
+ return success;
+}
+
+// Implementing method from WebRtc VoEMediaProcess interface
+// Do not lock mux_channel_cs_ in this callback.
+void WebRtcVoiceEngine::Process(int channel,
+ webrtc::ProcessingTypes type,
+ int16_t audio10ms[],
+ int length,
+ int sampling_freq,
+ bool is_stereo) {
+ talk_base::CritScope cs(&signal_media_critical_);
+ AudioFrame frame(audio10ms, length, sampling_freq, is_stereo);
+ if (type == webrtc::kPlaybackAllChannelsMixed) {
+ SignalRxMediaFrame(rx_processor_ssrc_, MPD_RX, &frame);
+ } else if (type == webrtc::kRecordingPerChannel) {
+ SignalTxMediaFrame(tx_processor_ssrc_, MPD_TX, &frame);
+ } else {
+ LOG(LS_WARNING) << "Media Processing invoked unexpectedly."
+ << " channel: " << channel << " type: " << type
+ << " tx_ssrc: " << tx_processor_ssrc_
+ << " rx_ssrc: " << rx_processor_ssrc_;
+ }
+}
+
+void WebRtcVoiceEngine::StartAecDump(const std::string& filename) {
+ if (!is_dumping_aec_) {
+ // Start dumping AEC when we are not dumping.
+ if (voe_wrapper_->processing()->StartDebugRecording(
+ filename.c_str()) != webrtc::AudioProcessing::kNoError) {
+ LOG_RTCERR0(StartDebugRecording);
+ } else {
+ is_dumping_aec_ = true;
+ }
+ }
+}
+
+void WebRtcVoiceEngine::StopAecDump() {
+ if (is_dumping_aec_) {
+ // Stop dumping AEC when we are dumping.
+ if (voe_wrapper_->processing()->StopDebugRecording() !=
+ webrtc::AudioProcessing::kNoError) {
+ LOG_RTCERR0(StopDebugRecording);
+ }
+ is_dumping_aec_ = false;
+ }
+}
+
+// This struct relies on the generated copy constructor and assignment operator
+// since it is used in an stl::map.
+struct WebRtcVoiceMediaChannel::WebRtcVoiceChannelInfo {
+ WebRtcVoiceChannelInfo() : channel(-1), renderer(NULL) {}
+ WebRtcVoiceChannelInfo(int ch, AudioRenderer* r)
+ : channel(ch),
+ renderer(r) {}
+ ~WebRtcVoiceChannelInfo() {}
+
+ int channel;
+ AudioRenderer* renderer;
+};
+
+// WebRtcVoiceMediaChannel
+WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine)
+ : WebRtcMediaChannel<VoiceMediaChannel, WebRtcVoiceEngine>(
+ engine,
+ engine->voe()->base()->CreateChannel()),
+ options_(),
+ dtmf_allowed_(false),
+ desired_playout_(false),
+ nack_enabled_(false),
+ playout_(false),
+ desired_send_(SEND_NOTHING),
+ send_(SEND_NOTHING),
+ default_receive_ssrc_(0) {
+ engine->RegisterChannel(this);
+ LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel "
+ << voe_channel();
+
+ ConfigureSendChannel(voe_channel());
+}
+
+WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() {
+ LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel "
+ << voe_channel();
+
+ // Remove any remaining send streams, the default channel will be deleted
+ // later.
+ while (!send_channels_.empty())
+ RemoveSendStream(send_channels_.begin()->first);
+
+ // Unregister ourselves from the engine.
+ engine()->UnregisterChannel(this);
+ // Remove any remaining streams.
+ while (!receive_channels_.empty()) {
+ RemoveRecvStream(receive_channels_.begin()->first);
+ }
+
+ // Delete the default channel.
+ DeleteChannel(voe_channel());
+}
+
+bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) {
+ LOG(LS_INFO) << "Setting voice channel options: "
+ << options.ToString();
+
+ // TODO(xians): Add support to set different options for different send
+ // streams after we support multiple APMs.
+
+ // We retain all of the existing options, and apply the given ones
+ // on top. This means there is no way to "clear" options such that
+ // they go back to the engine default.
+ options_.SetAll(options);
+
+ if (send_ != SEND_NOTHING) {
+ if (!engine()->SetOptionOverrides(options_)) {
+ LOG(LS_WARNING) <<
+ "Failed to engine SetOptionOverrides during channel SetOptions.";
+ return false;
+ }
+ } else {
+ // Will be interpreted when appropriate.
+ }
+
+ LOG(LS_INFO) << "Set voice channel options. Current options: "
+ << options_.ToString();
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetRecvCodecs(
+ const std::vector<AudioCodec>& codecs) {
+ // Set the payload types to be used for incoming media.
+ LOG(LS_INFO) << "Setting receive voice codecs:";
+
+ std::vector<AudioCodec> new_codecs;
+ // Find all new codecs. We allow adding new codecs but don't allow changing
+ // the payload type of codecs that is already configured since we might
+ // already be receiving packets with that payload type.
+ for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
+ it != codecs.end(); ++it) {
+ AudioCodec old_codec;
+ if (FindCodec(recv_codecs_, *it, &old_codec)) {
+ if (old_codec.id != it->id) {
+ LOG(LS_ERROR) << it->name << " payload type changed.";
+ return false;
+ }
+ } else {
+ new_codecs.push_back(*it);
+ }
+ }
+ if (new_codecs.empty()) {
+ // There are no new codecs to configure. Already configured codecs are
+ // never removed.
+ return true;
+ }
+
+ if (playout_) {
+ // Receive codecs can not be changed while playing. So we temporarily
+ // pause playout.
+ PausePlayout();
+ }
+
+ bool ret = true;
+ for (std::vector<AudioCodec>::const_iterator it = new_codecs.begin();
+ it != new_codecs.end() && ret; ++it) {
+ webrtc::CodecInst voe_codec;
+ if (engine()->FindWebRtcCodec(*it, &voe_codec)) {
+ LOG(LS_INFO) << ToString(*it);
+ voe_codec.pltype = it->id;
+ if (default_receive_ssrc_ == 0) {
+ // Set the receive codecs on the default channel explicitly if the
+ // default channel is not used by |receive_channels_|, this happens in
+ // conference mode or in non-conference mode when there is no playout
+ // channel.
+ // TODO(xians): Figure out how we use the default channel in conference
+ // mode.
+ if (engine()->voe()->codec()->SetRecPayloadType(
+ voe_channel(), voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, voe_channel(), ToString(voe_codec));
+ ret = false;
+ }
+ }
+
+ // Set the receive codecs on all receiving channels.
+ for (ChannelMap::iterator it = receive_channels_.begin();
+ it != receive_channels_.end() && ret; ++it) {
+ if (engine()->voe()->codec()->SetRecPayloadType(
+ it->second.channel, voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, it->second.channel,
+ ToString(voe_codec));
+ ret = false;
+ }
+ }
+ } else {
+ LOG(LS_WARNING) << "Unknown codec " << ToString(*it);
+ ret = false;
+ }
+ }
+ if (ret) {
+ recv_codecs_ = codecs;
+ }
+
+ if (desired_playout_ && !playout_) {
+ ResumePlayout();
+ }
+ return ret;
+}
+
+bool WebRtcVoiceMediaChannel::SetSendCodecs(
+ const std::vector<AudioCodec>& codecs) {
+ // TODO(xians): Break down this function into SetSendCodecs(channel, codecs)
+ // to support per-channel codecs.
+
+ // Disable DTMF, VAD, and FEC unless we know the other side wants them.
+ dtmf_allowed_ = false;
+ for (ChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ engine()->voe()->codec()->SetVADStatus(iter->second.channel, false);
+ engine()->voe()->rtp()->SetNACKStatus(iter->second.channel, false, 0);
+ engine()->voe()->rtp()->SetFECStatus(iter->second.channel, false);
+ }
+
+ // Scan through the list to figure out the codec to use for sending, along
+ // with the proper configuration for VAD and DTMF.
+ bool first = true;
+ webrtc::CodecInst send_codec;
+ memset(&send_codec, 0, sizeof(send_codec));
+
+ for (std::vector<AudioCodec>::const_iterator it = codecs.begin();
+ it != codecs.end(); ++it) {
+ // Ignore codecs we don't know about. The negotiation step should prevent
+ // this, but double-check to be sure.
+ webrtc::CodecInst voe_codec;
+ if (!engine()->FindWebRtcCodec(*it, &voe_codec)) {
+ LOG(LS_WARNING) << "Unknown codec " << ToString(voe_codec);
+ continue;
+ }
+
+ // If OPUS, change what we send according to the "stereo" codec
+ // parameter, and not the "channels" parameter. We set
+ // voe_codec.channels to 2 if "stereo=1" and 1 otherwise. If
+ // the bitrate is not specified, i.e. is zero, we set it to the
+ // appropriate default value for mono or stereo Opus.
+ if (IsOpus(*it)) {
+ if (IsOpusStereoEnabled(*it)) {
+ voe_codec.channels = 2;
+ if (!IsValidOpusBitrate(it->bitrate)) {
+ if (it->bitrate != 0) {
+ LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
+ << it->bitrate
+ << ") with default opus stereo bitrate: "
+ << kOpusStereoBitrate;
+ }
+ voe_codec.rate = kOpusStereoBitrate;
+ }
+ } else {
+ voe_codec.channels = 1;
+ if (!IsValidOpusBitrate(it->bitrate)) {
+ if (it->bitrate != 0) {
+ LOG(LS_WARNING) << "Overrides the invalid supplied bitrate("
+ << it->bitrate
+ << ") with default opus mono bitrate: "
+ << kOpusMonoBitrate;
+ }
+ voe_codec.rate = kOpusMonoBitrate;
+ }
+ }
+ int bitrate_from_params = GetOpusBitrateFromParams(*it);
+ if (bitrate_from_params != 0) {
+ voe_codec.rate = bitrate_from_params;
+ }
+ }
+
+ // Find the DTMF telephone event "codec" and tell VoiceEngine channels
+ // about it.
+ if (_stricmp(it->name.c_str(), "telephone-event") == 0 ||
+ _stricmp(it->name.c_str(), "audio/telephone-event") == 0) {
+ for (ChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ if (engine()->voe()->dtmf()->SetSendTelephoneEventPayloadType(
+ iter->second.channel, it->id) == -1) {
+ LOG_RTCERR2(SetSendTelephoneEventPayloadType,
+ iter->second.channel, it->id);
+ return false;
+ }
+ }
+ dtmf_allowed_ = true;
+ }
+
+ // Turn voice activity detection/comfort noise on if supported.
+ // Set the wideband CN payload type appropriately.
+ // (narrowband always uses the static payload type 13).
+ if (_stricmp(it->name.c_str(), "CN") == 0) {
+ webrtc::PayloadFrequencies cn_freq;
+ switch (it->clockrate) {
+ case 8000:
+ cn_freq = webrtc::kFreq8000Hz;
+ break;
+ case 16000:
+ cn_freq = webrtc::kFreq16000Hz;
+ break;
+ case 32000:
+ cn_freq = webrtc::kFreq32000Hz;
+ break;
+ default:
+ LOG(LS_WARNING) << "CN frequency " << it->clockrate
+ << " not supported.";
+ continue;
+ }
+ // Loop through the existing send channels and set the CN payloadtype
+ // and the VAD status.
+ for (ChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ int channel = iter->second.channel;
+ // The CN payload type for 8000 Hz clockrate is fixed at 13.
+ if (cn_freq != webrtc::kFreq8000Hz) {
+ if (engine()->voe()->codec()->SetSendCNPayloadType(
+ channel, it->id, cn_freq) == -1) {
+ LOG_RTCERR3(SetSendCNPayloadType, channel, it->id, cn_freq);
+ // TODO(ajm): This failure condition will be removed from VoE.
+ // Restore the return here when we update to a new enough webrtc.
+ //
+ // Not returning false because the SetSendCNPayloadType will fail if
+ // the channel is already sending.
+ // This can happen if the remote description is applied twice, for
+ // example in the case of ROAP on top of JSEP, where both side will
+ // send the offer.
+ }
+ }
+
+ // Only turn on VAD if we have a CN payload type that matches the
+ // clockrate for the codec we are going to use.
+ if (it->clockrate == send_codec.plfreq) {
+ LOG(LS_INFO) << "Enabling VAD";
+ if (engine()->voe()->codec()->SetVADStatus(channel, true) == -1) {
+ LOG_RTCERR2(SetVADStatus, channel, true);
+ return false;
+ }
+ }
+ }
+ }
+
+ // We'll use the first codec in the list to actually send audio data.
+ // Be sure to use the payload type requested by the remote side.
+ // "red", for FEC audio, is a special case where the actual codec to be
+ // used is specified in params.
+ if (first) {
+ if (_stricmp(it->name.c_str(), "red") == 0) {
+ // Parse out the RED parameters. If we fail, just ignore RED;
+ // we don't support all possible params/usage scenarios.
+ if (!GetRedSendCodec(*it, codecs, &send_codec)) {
+ continue;
+ }
+
+ // Enable redundant encoding of the specified codec. Treat any
+ // failure as a fatal internal error.
+ LOG(LS_INFO) << "Enabling FEC";
+ for (ChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ if (engine()->voe()->rtp()->SetFECStatus(iter->second.channel,
+ true, it->id) == -1) {
+ LOG_RTCERR3(SetFECStatus, iter->second.channel, true, it->id);
+ return false;
+ }
+ }
+ } else {
+ send_codec = voe_codec;
+ nack_enabled_ = IsNackEnabled(*it);
+ SetNack(send_channels_, nack_enabled_);
+ }
+ first = false;
+ // Set the codec immediately, since SetVADStatus() depends on whether
+ // the current codec is mono or stereo.
+ if (!SetSendCodec(send_codec))
+ return false;
+ }
+ }
+ SetNack(receive_channels_, nack_enabled_);
+
+
+ // If we're being asked to set an empty list of codecs, due to a buggy client,
+ // choose the most common format: PCMU
+ if (first) {
+ LOG(LS_WARNING) << "Received empty list of codecs; using PCMU/8000";
+ AudioCodec codec(0, "PCMU", 8000, 0, 1, 0);
+ engine()->FindWebRtcCodec(codec, &send_codec);
+ if (!SetSendCodec(send_codec))
+ return false;
+ }
+
+ return true;
+}
+
+void WebRtcVoiceMediaChannel::SetNack(const ChannelMap& channels,
+ bool nack_enabled) {
+ for (ChannelMap::const_iterator it = channels.begin();
+ it != channels.end(); ++it) {
+ SetNack(it->first, it->second.channel, nack_enabled_);
+ }
+}
+
+void WebRtcVoiceMediaChannel::SetNack(uint32 ssrc, int channel,
+ bool nack_enabled) {
+ if (nack_enabled) {
+ LOG(LS_INFO) << "Enabling NACK for stream " << ssrc;
+ engine()->voe()->rtp()->SetNACKStatus(channel, true, kNackMaxPackets);
+ } else {
+ LOG(LS_INFO) << "Disabling NACK for stream " << ssrc;
+ engine()->voe()->rtp()->SetNACKStatus(channel, false, 0);
+ }
+}
+
+bool WebRtcVoiceMediaChannel::SetSendCodec(
+ const webrtc::CodecInst& send_codec) {
+ LOG(LS_INFO) << "Selected voice codec " << ToString(send_codec)
+ << ", bitrate=" << send_codec.rate;
+ for (ChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ if (!SetSendCodec(iter->second.channel, send_codec))
+ return false;
+ }
+
+ // All SetSendCodec calls were successful. Update the global state
+ // accordingly.
+ send_codec_.reset(new webrtc::CodecInst(send_codec));
+
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetSendCodec(
+ int channel, const webrtc::CodecInst& send_codec) {
+ LOG(LS_INFO) << "Send channel " << channel << " selected voice codec "
+ << ToString(send_codec) << ", bitrate=" << send_codec.rate;
+
+ if (engine()->voe()->codec()->SetSendCodec(channel, send_codec) == -1) {
+ LOG_RTCERR2(SetSendCodec, channel, ToString(send_codec));
+ return false;
+ }
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetRecvRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions) {
+ // We don't support any incoming extensions headers right now.
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetSendRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions) {
+ // Enable the audio level extension header if requested.
+ std::vector<RtpHeaderExtension>::const_iterator it;
+ for (it = extensions.begin(); it != extensions.end(); ++it) {
+ if (it->uri == kRtpAudioLevelHeaderExtension) {
+ break;
+ }
+ }
+
+ bool enable = (it != extensions.end());
+ int id = 0;
+
+ if (enable) {
+ id = it->id;
+ if (id < kMinRtpHeaderExtensionId ||
+ id > kMaxRtpHeaderExtensionId) {
+ LOG(LS_WARNING) << "Invalid RTP header extension id " << id;
+ return false;
+ }
+ }
+
+ LOG(LS_INFO) << "Enabling audio level header extension with ID " << id;
+ for (ChannelMap::const_iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ if (engine()->voe()->rtp()->SetRTPAudioLevelIndicationStatus(
+ iter->second.channel, enable, id) == -1) {
+ LOG_RTCERR3(SetRTPAudioLevelIndicationStatus,
+ iter->second.channel, enable, id);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetPlayout(bool playout) {
+ desired_playout_ = playout;
+ return ChangePlayout(desired_playout_);
+}
+
+bool WebRtcVoiceMediaChannel::PausePlayout() {
+ return ChangePlayout(false);
+}
+
+bool WebRtcVoiceMediaChannel::ResumePlayout() {
+ return ChangePlayout(desired_playout_);
+}
+
+bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) {
+ if (playout_ == playout) {
+ return true;
+ }
+
+ // Change the playout of all channels to the new state.
+ bool result = true;
+ if (receive_channels_.empty()) {
+ // Only toggle the default channel if we don't have any other channels.
+ result = SetPlayout(voe_channel(), playout);
+ }
+ for (ChannelMap::iterator it = receive_channels_.begin();
+ it != receive_channels_.end() && result; ++it) {
+ if (!SetPlayout(it->second.channel, playout)) {
+ LOG(LS_ERROR) << "SetPlayout " << playout << " on channel "
+ << it->second.channel << " failed";
+ result = false;
+ }
+ }
+
+ if (result) {
+ playout_ = playout;
+ }
+ return result;
+}
+
+bool WebRtcVoiceMediaChannel::SetSend(SendFlags send) {
+ desired_send_ = send;
+ if (!send_channels_.empty())
+ return ChangeSend(desired_send_);
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::PauseSend() {
+ return ChangeSend(SEND_NOTHING);
+}
+
+bool WebRtcVoiceMediaChannel::ResumeSend() {
+ return ChangeSend(desired_send_);
+}
+
+bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) {
+ if (send_ == send) {
+ return true;
+ }
+
+ // Change the settings on each send channel.
+ if (send == SEND_MICROPHONE)
+ engine()->SetOptionOverrides(options_);
+
+ // Change the settings on each send channel.
+ for (ChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ if (!ChangeSend(iter->second.channel, send))
+ return false;
+ }
+
+ // Clear up the options after stopping sending.
+ if (send == SEND_NOTHING)
+ engine()->ClearOptionOverrides();
+
+ send_ = send;
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::ChangeSend(int channel, SendFlags send) {
+ if (send == SEND_MICROPHONE) {
+ if (engine()->voe()->base()->StartSend(channel) == -1) {
+ LOG_RTCERR1(StartSend, channel);
+ return false;
+ }
+ if (engine()->voe()->file() &&
+ engine()->voe()->file()->StopPlayingFileAsMicrophone(channel) == -1) {
+ LOG_RTCERR1(StopPlayingFileAsMicrophone, channel);
+ return false;
+ }
+ } else { // SEND_NOTHING
+ ASSERT(send == SEND_NOTHING);
+ if (engine()->voe()->base()->StopSend(channel) == -1) {
+ LOG_RTCERR1(StopSend, channel);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void WebRtcVoiceMediaChannel::ConfigureSendChannel(int channel) {
+ if (engine()->voe()->network()->RegisterExternalTransport(
+ channel, *this) == -1) {
+ LOG_RTCERR2(RegisterExternalTransport, channel, this);
+ }
+
+ // Enable RTCP (for quality stats and feedback messages)
+ EnableRtcp(channel);
+
+ // Reset all recv codecs; they will be enabled via SetRecvCodecs.
+ ResetRecvCodecs(channel);
+}
+
+bool WebRtcVoiceMediaChannel::DeleteChannel(int channel) {
+ if (engine()->voe()->network()->DeRegisterExternalTransport(channel) == -1) {
+ LOG_RTCERR1(DeRegisterExternalTransport, channel);
+ }
+
+ if (engine()->voe()->base()->DeleteChannel(channel) == -1) {
+ LOG_RTCERR1(DeleteChannel, channel);
+ return false;
+ }
+
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) {
+ // If the default channel is already used for sending create a new channel
+ // otherwise use the default channel for sending.
+ int channel = GetSendChannelNum(sp.first_ssrc());
+ if (channel != -1) {
+ LOG(LS_ERROR) << "Stream already exists with ssrc " << sp.first_ssrc();
+ return false;
+ }
+
+ bool default_channel_is_available = true;
+ for (ChannelMap::const_iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ if (IsDefaultChannel(iter->second.channel)) {
+ default_channel_is_available = false;
+ break;
+ }
+ }
+ if (default_channel_is_available) {
+ channel = voe_channel();
+ } else {
+ // Create a new channel for sending audio data.
+ channel = engine()->voe()->base()->CreateChannel();
+ if (channel == -1) {
+ LOG_RTCERR0(CreateChannel);
+ return false;
+ }
+
+ ConfigureSendChannel(channel);
+ }
+
+ // Save the channel to send_channels_, so that RemoveSendStream() can still
+ // delete the channel in case failure happens below.
+ send_channels_[sp.first_ssrc()] = WebRtcVoiceChannelInfo(channel, NULL);
+
+ // Set the send (local) SSRC.
+ // If there are multiple send SSRCs, we can only set the first one here, and
+ // the rest of the SSRC(s) need to be set after SetSendCodec has been called
+ // (with a codec requires multiple SSRC(s)).
+ if (engine()->voe()->rtp()->SetLocalSSRC(channel, sp.first_ssrc()) == -1) {
+ LOG_RTCERR2(SetSendSSRC, channel, sp.first_ssrc());
+ return false;
+ }
+
+ // At this point the channel's local SSRC has been updated. If the channel is
+ // the default channel make sure that all the receive channels are updated as
+ // well. Receive channels have to have the same SSRC as the default channel in
+ // order to send receiver reports with this SSRC.
+ if (IsDefaultChannel(channel)) {
+ for (ChannelMap::const_iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ // Only update the SSRC for non-default channels.
+ if (!IsDefaultChannel(it->second.channel)) {
+ if (engine()->voe()->rtp()->SetLocalSSRC(it->second.channel,
+ sp.first_ssrc()) != 0) {
+ LOG_RTCERR2(SetLocalSSRC, it->second.channel, sp.first_ssrc());
+ return false;
+ }
+ }
+ }
+ }
+
+ if (engine()->voe()->rtp()->SetRTCP_CNAME(channel, sp.cname.c_str()) == -1) {
+ LOG_RTCERR2(SetRTCP_CNAME, channel, sp.cname);
+ return false;
+ }
+
+ // Set the current codec to be used for the new channel.
+ if (send_codec_ && !SetSendCodec(channel, *send_codec_))
+ return false;
+
+ return ChangeSend(channel, desired_send_);
+}
+
+bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32 ssrc) {
+ ChannelMap::iterator it = send_channels_.find(ssrc);
+ if (it == send_channels_.end()) {
+ LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
+ << " which doesn't exist.";
+ return false;
+ }
+
+ int channel = it->second.channel;
+ ChangeSend(channel, SEND_NOTHING);
+
+ // Notify the audio renderer that the send channel is going away.
+ if (it->second.renderer)
+ it->second.renderer->RemoveChannel(channel);
+
+ if (IsDefaultChannel(channel)) {
+ // Do not delete the default channel since the receive channels depend on
+ // the default channel, recycle it instead.
+ ChangeSend(channel, SEND_NOTHING);
+ } else {
+ // Clean up and delete the send channel.
+ LOG(LS_INFO) << "Removing audio send stream " << ssrc
+ << " with VoiceEngine channel #" << channel << ".";
+ if (!DeleteChannel(channel))
+ return false;
+ }
+
+ send_channels_.erase(it);
+ if (send_channels_.empty())
+ ChangeSend(SEND_NOTHING);
+
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) {
+ talk_base::CritScope lock(&receive_channels_cs_);
+
+ if (!VERIFY(sp.ssrcs.size() == 1))
+ return false;
+ uint32 ssrc = sp.first_ssrc();
+
+ if (receive_channels_.find(ssrc) != receive_channels_.end()) {
+ LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc;
+ return false;
+ }
+
+ // Reuse default channel for recv stream in non-conference mode call
+ // when the default channel is not being used.
+ if (!InConferenceMode() && default_receive_ssrc_ == 0) {
+ LOG(LS_INFO) << "Recv stream " << sp.first_ssrc()
+ << " reuse default channel";
+ default_receive_ssrc_ = sp.first_ssrc();
+ receive_channels_.insert(std::make_pair(
+ default_receive_ssrc_, WebRtcVoiceChannelInfo(voe_channel(), NULL)));
+ return SetPlayout(voe_channel(), playout_);
+ }
+
+ // Create a new channel for receiving audio data.
+ int channel = engine()->voe()->base()->CreateChannel();
+ if (channel == -1) {
+ LOG_RTCERR0(CreateChannel);
+ return false;
+ }
+
+ // Configure to use external transport, like our default channel.
+ if (engine()->voe()->network()->RegisterExternalTransport(
+ channel, *this) == -1) {
+ LOG_RTCERR2(SetExternalTransport, channel, this);
+ return false;
+ }
+
+ // Use the same SSRC as our default channel (so the RTCP reports are correct).
+ unsigned int send_ssrc;
+ webrtc::VoERTP_RTCP* rtp = engine()->voe()->rtp();
+ if (rtp->GetLocalSSRC(voe_channel(), send_ssrc) == -1) {
+ LOG_RTCERR2(GetSendSSRC, channel, send_ssrc);
+ return false;
+ }
+ if (rtp->SetLocalSSRC(channel, send_ssrc) == -1) {
+ LOG_RTCERR2(SetSendSSRC, channel, send_ssrc);
+ return false;
+ }
+
+ // Use the same recv payload types as our default channel.
+ ResetRecvCodecs(channel);
+ if (!recv_codecs_.empty()) {
+ for (std::vector<AudioCodec>::const_iterator it = recv_codecs_.begin();
+ it != recv_codecs_.end(); ++it) {
+ webrtc::CodecInst voe_codec;
+ if (engine()->FindWebRtcCodec(*it, &voe_codec)) {
+ voe_codec.pltype = it->id;
+ voe_codec.rate = 0; // Needed to make GetRecPayloadType work for ISAC
+ if (engine()->voe()->codec()->GetRecPayloadType(
+ voe_channel(), voe_codec) != -1) {
+ if (engine()->voe()->codec()->SetRecPayloadType(
+ channel, voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ if (InConferenceMode()) {
+ // To be in par with the video, voe_channel() is not used for receiving in
+ // a conference call.
+ if (receive_channels_.empty() && default_receive_ssrc_ == 0 && playout_) {
+ // This is the first stream in a multi user meeting. We can now
+ // disable playback of the default stream. This since the default
+ // stream will probably have received some initial packets before
+ // the new stream was added. This will mean that the CN state from
+ // the default channel will be mixed in with the other streams
+ // throughout the whole meeting, which might be disturbing.
+ LOG(LS_INFO) << "Disabling playback on the default voice channel";
+ SetPlayout(voe_channel(), false);
+ }
+ }
+ SetNack(ssrc, channel, nack_enabled_);
+
+ receive_channels_.insert(
+ std::make_pair(ssrc, WebRtcVoiceChannelInfo(channel, NULL)));
+
+ // TODO(juberti): We should rollback the add if SetPlayout fails.
+ LOG(LS_INFO) << "New audio stream " << ssrc
+ << " registered to VoiceEngine channel #"
+ << channel << ".";
+ return SetPlayout(channel, playout_);
+}
+
+bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32 ssrc) {
+ talk_base::CritScope lock(&receive_channels_cs_);
+ ChannelMap::iterator it = receive_channels_.find(ssrc);
+ if (it == receive_channels_.end()) {
+ LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
+ << " which doesn't exist.";
+ return false;
+ }
+
+ if (ssrc == default_receive_ssrc_) {
+ ASSERT(IsDefaultChannel(it->second.channel));
+ // Recycle the default channel is for recv stream.
+ if (playout_)
+ SetPlayout(voe_channel(), false);
+
+ if (it->second.renderer)
+ it->second.renderer->RemoveChannel(voe_channel());
+
+ default_receive_ssrc_ = 0;
+ receive_channels_.erase(it);
+ return true;
+ }
+
+ // Non default channel.
+ // Notify the renderer that channel is going away.
+ if (it->second.renderer)
+ it->second.renderer->RemoveChannel(it->second.channel);
+
+ LOG(LS_INFO) << "Removing audio stream " << ssrc
+ << " with VoiceEngine channel #" << it->second.channel << ".";
+ if (!DeleteChannel(it->second.channel)) {
+ // Erase the entry anyhow.
+ receive_channels_.erase(it);
+ return false;
+ }
+
+ receive_channels_.erase(it);
+ bool enable_default_channel_playout = false;
+ if (receive_channels_.empty()) {
+ // The last stream was removed. We can now enable the default
+ // channel for new channels to be played out immediately without
+ // waiting for AddStream messages.
+ // We do this for both conference mode and non-conference mode.
+ // TODO(oja): Does the default channel still have it's CN state?
+ enable_default_channel_playout = true;
+ }
+ if (!InConferenceMode() && receive_channels_.size() == 1 &&
+ default_receive_ssrc_ != 0) {
+ // Only the default channel is active, enable the playout on default
+ // channel.
+ enable_default_channel_playout = true;
+ }
+ if (enable_default_channel_playout && playout_) {
+ LOG(LS_INFO) << "Enabling playback on the default voice channel";
+ SetPlayout(voe_channel(), true);
+ }
+
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetRemoteRenderer(uint32 ssrc,
+ AudioRenderer* renderer) {
+ ChannelMap::iterator it = receive_channels_.find(ssrc);
+ if (it == receive_channels_.end()) {
+ if (renderer) {
+ // Return an error if trying to set a valid renderer with an invalid ssrc.
+ LOG(LS_ERROR) << "SetRemoteRenderer failed with ssrc "<< ssrc;
+ return false;
+ }
+
+ // The channel likely has gone away, do nothing.
+ return true;
+ }
+
+ AudioRenderer* remote_renderer = it->second.renderer;
+ if (renderer) {
+ ASSERT(remote_renderer == NULL || remote_renderer == renderer);
+ if (!remote_renderer) {
+ renderer->AddChannel(it->second.channel);
+ }
+ } else if (remote_renderer) {
+ // |renderer| == NULL, remove the channel from the renderer.
+ remote_renderer->RemoveChannel(it->second.channel);
+ }
+
+ // Assign the new value to the struct.
+ it->second.renderer = renderer;
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32 ssrc,
+ AudioRenderer* renderer) {
+ ChannelMap::iterator it = send_channels_.find(ssrc);
+ if (it == send_channels_.end()) {
+ if (renderer) {
+ // Return an error if trying to set a valid renderer with an invalid ssrc.
+ LOG(LS_ERROR) << "SetLocalRenderer failed with ssrc "<< ssrc;
+ return false;
+ }
+
+ // The channel likely has gone away, do nothing.
+ return true;
+ }
+
+ AudioRenderer* local_renderer = it->second.renderer;
+ if (renderer) {
+ ASSERT(local_renderer == NULL || local_renderer == renderer);
+ if (!local_renderer)
+ renderer->AddChannel(it->second.channel);
+ } else if (local_renderer) {
+ local_renderer->RemoveChannel(it->second.channel);
+ }
+
+ // Assign the new value to the struct.
+ it->second.renderer = renderer;
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::GetActiveStreams(
+ AudioInfo::StreamList* actives) {
+ // In conference mode, the default channel should not be in
+ // |receive_channels_|.
+ actives->clear();
+ for (ChannelMap::iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ int level = GetOutputLevel(it->second.channel);
+ if (level > 0) {
+ actives->push_back(std::make_pair(it->first, level));
+ }
+ }
+ return true;
+}
+
+int WebRtcVoiceMediaChannel::GetOutputLevel() {
+ // return the highest output level of all streams
+ int highest = GetOutputLevel(voe_channel());
+ for (ChannelMap::iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ int level = GetOutputLevel(it->second.channel);
+ highest = talk_base::_max(level, highest);
+ }
+ return highest;
+}
+
+int WebRtcVoiceMediaChannel::GetTimeSinceLastTyping() {
+ int ret;
+ if (engine()->voe()->processing()->TimeSinceLastTyping(ret) == -1) {
+ // In case of error, log the info and continue
+ LOG_RTCERR0(TimeSinceLastTyping);
+ ret = -1;
+ } else {
+ ret *= 1000; // We return ms, webrtc returns seconds.
+ }
+ return ret;
+}
+
+void WebRtcVoiceMediaChannel::SetTypingDetectionParameters(int time_window,
+ int cost_per_typing, int reporting_threshold, int penalty_decay,
+ int type_event_delay) {
+ if (engine()->voe()->processing()->SetTypingDetectionParameters(
+ time_window, cost_per_typing,
+ reporting_threshold, penalty_decay, type_event_delay) == -1) {
+ // In case of error, log the info and continue
+ LOG_RTCERR5(SetTypingDetectionParameters, time_window,
+ cost_per_typing, reporting_threshold, penalty_decay,
+ type_event_delay);
+ }
+}
+
+bool WebRtcVoiceMediaChannel::SetOutputScaling(
+ uint32 ssrc, double left, double right) {
+ talk_base::CritScope lock(&receive_channels_cs_);
+ // Collect the channels to scale the output volume.
+ std::vector<int> channels;
+ if (0 == ssrc) { // Collect all channels, including the default one.
+ // Default channel is not in receive_channels_ if it is not being used for
+ // playout.
+ if (default_receive_ssrc_ == 0)
+ channels.push_back(voe_channel());
+ for (ChannelMap::const_iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ channels.push_back(it->second.channel);
+ }
+ } else { // Collect only the channel of the specified ssrc.
+ int channel = GetReceiveChannelNum(ssrc);
+ if (-1 == channel) {
+ LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
+ return false;
+ }
+ channels.push_back(channel);
+ }
+
+ // Scale the output volume for the collected channels. We first normalize to
+ // scale the volume and then set the left and right pan.
+ float scale = static_cast<float>(talk_base::_max(left, right));
+ if (scale > 0.0001f) {
+ left /= scale;
+ right /= scale;
+ }
+ for (std::vector<int>::const_iterator it = channels.begin();
+ it != channels.end(); ++it) {
+ if (-1 == engine()->voe()->volume()->SetChannelOutputVolumeScaling(
+ *it, scale)) {
+ LOG_RTCERR2(SetChannelOutputVolumeScaling, *it, scale);
+ return false;
+ }
+ if (-1 == engine()->voe()->volume()->SetOutputVolumePan(
+ *it, static_cast<float>(left), static_cast<float>(right))) {
+ LOG_RTCERR3(SetOutputVolumePan, *it, left, right);
+ // Do not return if fails. SetOutputVolumePan is not available for all
+ // pltforms.
+ }
+ LOG(LS_INFO) << "SetOutputScaling to left=" << left * scale
+ << " right=" << right * scale
+ << " for channel " << *it << " and ssrc " << ssrc;
+ }
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::GetOutputScaling(
+ uint32 ssrc, double* left, double* right) {
+ if (!left || !right) return false;
+
+ talk_base::CritScope lock(&receive_channels_cs_);
+ // Determine which channel based on ssrc.
+ int channel = (0 == ssrc) ? voe_channel() : GetReceiveChannelNum(ssrc);
+ if (channel == -1) {
+ LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc;
+ return false;
+ }
+
+ float scaling;
+ if (-1 == engine()->voe()->volume()->GetChannelOutputVolumeScaling(
+ channel, scaling)) {
+ LOG_RTCERR2(GetChannelOutputVolumeScaling, channel, scaling);
+ return false;
+ }
+
+ float left_pan;
+ float right_pan;
+ if (-1 == engine()->voe()->volume()->GetOutputVolumePan(
+ channel, left_pan, right_pan)) {
+ LOG_RTCERR3(GetOutputVolumePan, channel, left_pan, right_pan);
+ // If GetOutputVolumePan fails, we use the default left and right pan.
+ left_pan = 1.0f;
+ right_pan = 1.0f;
+ }
+
+ *left = scaling * left_pan;
+ *right = scaling * right_pan;
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetRingbackTone(const char *buf, int len) {
+ ringback_tone_.reset(new WebRtcSoundclipStream(buf, len));
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::PlayRingbackTone(uint32 ssrc,
+ bool play, bool loop) {
+ if (!ringback_tone_) {
+ return false;
+ }
+
+ // The voe file api is not available in chrome.
+ if (!engine()->voe()->file()) {
+ return false;
+ }
+
+ // Determine which VoiceEngine channel to play on.
+ int channel = (ssrc == 0) ? voe_channel() : GetReceiveChannelNum(ssrc);
+ if (channel == -1) {
+ return false;
+ }
+
+ // Make sure the ringtone is cued properly, and play it out.
+ if (play) {
+ ringback_tone_->set_loop(loop);
+ ringback_tone_->Rewind();
+ if (engine()->voe()->file()->StartPlayingFileLocally(channel,
+ ringback_tone_.get()) == -1) {
+ LOG_RTCERR2(StartPlayingFileLocally, channel, ringback_tone_.get());
+ LOG(LS_ERROR) << "Unable to start ringback tone";
+ return false;
+ }
+ ringback_channels_.insert(channel);
+ LOG(LS_INFO) << "Started ringback on channel " << channel;
+ } else {
+ if (engine()->voe()->file()->IsPlayingFileLocally(channel) == 1 &&
+ engine()->voe()->file()->StopPlayingFileLocally(channel) == -1) {
+ LOG_RTCERR1(StopPlayingFileLocally, channel);
+ return false;
+ }
+ LOG(LS_INFO) << "Stopped ringback on channel " << channel;
+ ringback_channels_.erase(channel);
+ }
+
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::CanInsertDtmf() {
+ return dtmf_allowed_;
+}
+
+bool WebRtcVoiceMediaChannel::InsertDtmf(uint32 ssrc, int event,
+ int duration, int flags) {
+ if (!dtmf_allowed_) {
+ return false;
+ }
+
+ // Send the event.
+ if (flags & cricket::DF_SEND) {
+ int channel = (ssrc == 0) ? voe_channel() : GetSendChannelNum(ssrc);
+ if (channel == -1) {
+ LOG(LS_WARNING) << "InsertDtmf - The specified ssrc "
+ << ssrc << " is not in use.";
+ return false;
+ }
+ // Send DTMF using out-of-band DTMF. ("true", as 3rd arg)
+ if (engine()->voe()->dtmf()->SendTelephoneEvent(
+ channel, event, true, duration) == -1) {
+ LOG_RTCERR4(SendTelephoneEvent, channel, event, true, duration);
+ return false;
+ }
+ }
+
+ // Play the event.
+ if (flags & cricket::DF_PLAY) {
+ // Play DTMF tone locally.
+ if (engine()->voe()->dtmf()->PlayDtmfTone(event, duration) == -1) {
+ LOG_RTCERR2(PlayDtmfTone, event, duration);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void WebRtcVoiceMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
+ // Pick which channel to send this packet to. If this packet doesn't match
+ // any multiplexed streams, just send it to the default channel. Otherwise,
+ // send it to the specific decoder instance for that stream.
+ int which_channel = GetReceiveChannelNum(
+ ParseSsrc(packet->data(), packet->length(), false));
+ if (which_channel == -1) {
+ which_channel = voe_channel();
+ }
+
+ // Stop any ringback that might be playing on the channel.
+ // It's possible the ringback has already stopped, ih which case we'll just
+ // use the opportunity to remove the channel from ringback_channels_.
+ if (engine()->voe()->file()) {
+ const std::set<int>::iterator it = ringback_channels_.find(which_channel);
+ if (it != ringback_channels_.end()) {
+ if (engine()->voe()->file()->IsPlayingFileLocally(
+ which_channel) == 1) {
+ engine()->voe()->file()->StopPlayingFileLocally(which_channel);
+ LOG(LS_INFO) << "Stopped ringback on channel " << which_channel
+ << " due to incoming media";
+ }
+ ringback_channels_.erase(which_channel);
+ }
+ }
+
+ // Pass it off to the decoder.
+ engine()->voe()->network()->ReceivedRTPPacket(
+ which_channel,
+ packet->data(),
+ static_cast<unsigned int>(packet->length()));
+}
+
+void WebRtcVoiceMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
+ // Sending channels need all RTCP packets with feedback information.
+ // Even sender reports can contain attached report blocks.
+ // Receiving channels need sender reports in order to create
+ // correct receiver reports.
+ int type = 0;
+ if (!GetRtcpType(packet->data(), packet->length(), &type)) {
+ LOG(LS_WARNING) << "Failed to parse type from received RTCP packet";
+ return;
+ }
+
+ // If it is a sender report, find the channel that is listening.
+ bool has_sent_to_default_channel = false;
+ if (type == kRtcpTypeSR) {
+ int which_channel = GetReceiveChannelNum(
+ ParseSsrc(packet->data(), packet->length(), true));
+ if (which_channel != -1) {
+ engine()->voe()->network()->ReceivedRTCPPacket(
+ which_channel,
+ packet->data(),
+ static_cast<unsigned int>(packet->length()));
+
+ if (IsDefaultChannel(which_channel))
+ has_sent_to_default_channel = true;
+ }
+ }
+
+ // SR may continue RR and any RR entry may correspond to any one of the send
+ // channels. So all RTCP packets must be forwarded all send channels. VoE
+ // will filter out RR internally.
+ for (ChannelMap::iterator iter = send_channels_.begin();
+ iter != send_channels_.end(); ++iter) {
+ // Make sure not sending the same packet to default channel more than once.
+ if (IsDefaultChannel(iter->second.channel) && has_sent_to_default_channel)
+ continue;
+
+ engine()->voe()->network()->ReceivedRTCPPacket(
+ iter->second.channel,
+ packet->data(),
+ static_cast<unsigned int>(packet->length()));
+ }
+}
+
+bool WebRtcVoiceMediaChannel::MuteStream(uint32 ssrc, bool muted) {
+ int channel = (ssrc == 0) ? voe_channel() : GetSendChannelNum(ssrc);
+ if (channel == -1) {
+ LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use.";
+ return false;
+ }
+ if (engine()->voe()->volume()->SetInputMute(channel, muted) == -1) {
+ LOG_RTCERR2(SetInputMute, channel, muted);
+ return false;
+ }
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetSendBandwidth(bool autobw, int bps) {
+ LOG(LS_INFO) << "WebRtcVoiceMediaChanne::SetSendBandwidth.";
+
+ if (!send_codec_) {
+ LOG(LS_INFO) << "The send codec has not been set up yet.";
+ return false;
+ }
+
+ // Bandwidth is auto by default.
+ if (autobw || bps <= 0)
+ return true;
+
+ webrtc::CodecInst codec = *send_codec_;
+ bool is_multi_rate = IsCodecMultiRate(codec);
+
+ if (is_multi_rate) {
+ // If codec is multi-rate then just set the bitrate.
+ codec.rate = bps;
+ if (!SetSendCodec(codec)) {
+ LOG(LS_INFO) << "Failed to set codec " << codec.plname
+ << " to bitrate " << bps << " bps.";
+ return false;
+ }
+ return true;
+ } else {
+ // If codec is not multi-rate and |bps| is less than the fixed bitrate
+ // then fail. If codec is not multi-rate and |bps| exceeds or equal the
+ // fixed bitrate then ignore.
+ if (bps < codec.rate) {
+ LOG(LS_INFO) << "Failed to set codec " << codec.plname
+ << " to bitrate " << bps << " bps"
+ << ", requires at least " << codec.rate << " bps.";
+ return false;
+ }
+ return true;
+ }
+}
+
+bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) {
+ bool echo_metrics_on = false;
+ // These can take on valid negative values, so use the lowest possible level
+ // as default rather than -1.
+ int echo_return_loss = -100;
+ int echo_return_loss_enhancement = -100;
+ // These can also be negative, but in practice -1 is only used to signal
+ // insufficient data, since the resolution is limited to multiples of 4 ms.
+ int echo_delay_median_ms = -1;
+ int echo_delay_std_ms = -1;
+ if (engine()->voe()->processing()->GetEcMetricsStatus(
+ echo_metrics_on) != -1 && echo_metrics_on) {
+ // TODO(ajm): we may want to use VoECallReport::GetEchoMetricsSummary
+ // here, but it appears to be unsuitable currently. Revisit after this is
+ // investigated: http://b/issue?id=5666755
+ int erl, erle, rerl, anlp;
+ if (engine()->voe()->processing()->GetEchoMetrics(
+ erl, erle, rerl, anlp) != -1) {
+ echo_return_loss = erl;
+ echo_return_loss_enhancement = erle;
+ }
+
+ int median, std;
+ if (engine()->voe()->processing()->GetEcDelayMetrics(median, std) != -1) {
+ echo_delay_median_ms = median;
+ echo_delay_std_ms = std;
+ }
+ }
+
+
+ webrtc::CallStatistics cs;
+ unsigned int ssrc;
+ webrtc::CodecInst codec;
+ unsigned int level;
+
+ for (ChannelMap::const_iterator channel_iter = send_channels_.begin();
+ channel_iter != send_channels_.end(); ++channel_iter) {
+ const int channel = channel_iter->second.channel;
+
+ // Fill in the sender info, based on what we know, and what the
+ // remote side told us it got from its RTCP report.
+ VoiceSenderInfo sinfo;
+
+ if (engine()->voe()->rtp()->GetRTCPStatistics(channel, cs) == -1 ||
+ engine()->voe()->rtp()->GetLocalSSRC(channel, ssrc) == -1) {
+ continue;
+ }
+
+ sinfo.ssrc = ssrc;
+ sinfo.codec_name = send_codec_.get() ? send_codec_->plname : "";
+ sinfo.bytes_sent = cs.bytesSent;
+ sinfo.packets_sent = cs.packetsSent;
+ // RTT isn't known until a RTCP report is received. Until then, VoiceEngine
+ // returns 0 to indicate an error value.
+ sinfo.rtt_ms = (cs.rttMs > 0) ? cs.rttMs : -1;
+
+ // Get data from the last remote RTCP report. Use default values if no data
+ // available.
+ sinfo.fraction_lost = -1.0;
+ sinfo.jitter_ms = -1;
+ sinfo.packets_lost = -1;
+ sinfo.ext_seqnum = -1;
+ std::vector<webrtc::ReportBlock> receive_blocks;
+ if (engine()->voe()->rtp()->GetRemoteRTCPReportBlocks(
+ channel, &receive_blocks) != -1 &&
+ engine()->voe()->codec()->GetSendCodec(channel, codec) != -1) {
+ std::vector<webrtc::ReportBlock>::iterator iter;
+ for (iter = receive_blocks.begin(); iter != receive_blocks.end();
+ ++iter) {
+ // Lookup report for send ssrc only.
+ if (iter->source_SSRC == sinfo.ssrc) {
+ // Convert Q8 to floating point.
+ sinfo.fraction_lost = static_cast<float>(iter->fraction_lost) / 256;
+ // Convert samples to milliseconds.
+ if (codec.plfreq / 1000 > 0) {
+ sinfo.jitter_ms = iter->interarrival_jitter / (codec.plfreq / 1000);
+ }
+ sinfo.packets_lost = iter->cumulative_num_packets_lost;
+ sinfo.ext_seqnum = iter->extended_highest_sequence_number;
+ break;
+ }
+ }
+ }
+
+ // Local speech level.
+ sinfo.audio_level = (engine()->voe()->volume()->
+ GetSpeechInputLevelFullRange(level) != -1) ? level : -1;
+
+ // TODO(xians): We are injecting the same APM logging to all the send
+ // channels here because there is no good way to know which send channel
+ // is using the APM. The correct fix is to allow the send channels to have
+ // their own APM so that we can feed the correct APM logging to different
+ // send channels. See issue crbug/264611 .
+ sinfo.echo_return_loss = echo_return_loss;
+ sinfo.echo_return_loss_enhancement = echo_return_loss_enhancement;
+ sinfo.echo_delay_median_ms = echo_delay_median_ms;
+ sinfo.echo_delay_std_ms = echo_delay_std_ms;
+
+ info->senders.push_back(sinfo);
+ }
+
+ // Build the list of receivers, one for each receiving channel, or 1 in
+ // a 1:1 call.
+ std::vector<int> channels;
+ for (ChannelMap::const_iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ channels.push_back(it->second.channel);
+ }
+ if (channels.empty()) {
+ channels.push_back(voe_channel());
+ }
+
+ // Get the SSRC and stats for each receiver, based on our own calculations.
+ for (std::vector<int>::const_iterator it = channels.begin();
+ it != channels.end(); ++it) {
+ memset(&cs, 0, sizeof(cs));
+ if (engine()->voe()->rtp()->GetRemoteSSRC(*it, ssrc) != -1 &&
+ engine()->voe()->rtp()->GetRTCPStatistics(*it, cs) != -1 &&
+ engine()->voe()->codec()->GetRecCodec(*it, codec) != -1) {
+ VoiceReceiverInfo rinfo;
+ rinfo.ssrc = ssrc;
+ rinfo.bytes_rcvd = cs.bytesReceived;
+ rinfo.packets_rcvd = cs.packetsReceived;
+ // The next four fields are from the most recently sent RTCP report.
+ // Convert Q8 to floating point.
+ rinfo.fraction_lost = static_cast<float>(cs.fractionLost) / (1 << 8);
+ rinfo.packets_lost = cs.cumulativeLost;
+ rinfo.ext_seqnum = cs.extendedMax;
+ // Convert samples to milliseconds.
+ if (codec.plfreq / 1000 > 0) {
+ rinfo.jitter_ms = cs.jitterSamples / (codec.plfreq / 1000);
+ }
+
+ // Get jitter buffer and total delay (alg + jitter + playout) stats.
+ webrtc::NetworkStatistics ns;
+ if (engine()->voe()->neteq() &&
+ engine()->voe()->neteq()->GetNetworkStatistics(
+ *it, ns) != -1) {
+ rinfo.jitter_buffer_ms = ns.currentBufferSize;
+ rinfo.jitter_buffer_preferred_ms = ns.preferredBufferSize;
+ rinfo.expand_rate =
+ static_cast<float>(ns.currentExpandRate) / (1 << 14);
+ }
+ if (engine()->voe()->sync()) {
+ int playout_buffer_delay_ms = 0;
+ engine()->voe()->sync()->GetDelayEstimate(
+ *it, &rinfo.delay_estimate_ms, &playout_buffer_delay_ms);
+ }
+
+ // Get speech level.
+ rinfo.audio_level = (engine()->voe()->volume()->
+ GetSpeechOutputLevelFullRange(*it, level) != -1) ? level : -1;
+ info->receivers.push_back(rinfo);
+ }
+ }
+
+ return true;
+}
+
+void WebRtcVoiceMediaChannel::GetLastMediaError(
+ uint32* ssrc, VoiceMediaChannel::Error* error) {
+ ASSERT(ssrc != NULL);
+ ASSERT(error != NULL);
+ FindSsrc(voe_channel(), ssrc);
+ *error = WebRtcErrorToChannelError(GetLastEngineError());
+}
+
+bool WebRtcVoiceMediaChannel::FindSsrc(int channel_num, uint32* ssrc) {
+ talk_base::CritScope lock(&receive_channels_cs_);
+ ASSERT(ssrc != NULL);
+ if (channel_num == -1 && send_ != SEND_NOTHING) {
+ // Sometimes the VoiceEngine core will throw error with channel_num = -1.
+ // This means the error is not limited to a specific channel. Signal the
+ // message using ssrc=0. If the current channel is sending, use this
+ // channel for sending the message.
+ *ssrc = 0;
+ return true;
+ } else {
+ // Check whether this is a sending channel.
+ for (ChannelMap::const_iterator it = send_channels_.begin();
+ it != send_channels_.end(); ++it) {
+ if (it->second.channel == channel_num) {
+ // This is a sending channel.
+ uint32 local_ssrc = 0;
+ if (engine()->voe()->rtp()->GetLocalSSRC(
+ channel_num, local_ssrc) != -1) {
+ *ssrc = local_ssrc;
+ }
+ return true;
+ }
+ }
+
+ // Check whether this is a receiving channel.
+ for (ChannelMap::const_iterator it = receive_channels_.begin();
+ it != receive_channels_.end(); ++it) {
+ if (it->second.channel == channel_num) {
+ *ssrc = it->first;
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void WebRtcVoiceMediaChannel::OnError(uint32 ssrc, int error) {
+ SignalMediaError(ssrc, WebRtcErrorToChannelError(error));
+}
+
+int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) {
+ unsigned int ulevel;
+ int ret =
+ engine()->voe()->volume()->GetSpeechOutputLevel(channel, ulevel);
+ return (ret == 0) ? static_cast<int>(ulevel) : -1;
+}
+
+int WebRtcVoiceMediaChannel::GetReceiveChannelNum(uint32 ssrc) {
+ ChannelMap::iterator it = receive_channels_.find(ssrc);
+ if (it != receive_channels_.end())
+ return it->second.channel;
+ return (ssrc == default_receive_ssrc_) ? voe_channel() : -1;
+}
+
+int WebRtcVoiceMediaChannel::GetSendChannelNum(uint32 ssrc) {
+ ChannelMap::iterator it = send_channels_.find(ssrc);
+ if (it != send_channels_.end())
+ return it->second.channel;
+
+ return -1;
+}
+
+bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec,
+ const std::vector<AudioCodec>& all_codecs, webrtc::CodecInst* send_codec) {
+ // Get the RED encodings from the parameter with no name. This may
+ // change based on what is discussed on the Jingle list.
+ // The encoding parameter is of the form "a/b"; we only support where
+ // a == b. Verify this and parse out the value into red_pt.
+ // If the parameter value is absent (as it will be until we wire up the
+ // signaling of this message), use the second codec specified (i.e. the
+ // one after "red") as the encoding parameter.
+ int red_pt = -1;
+ std::string red_params;
+ CodecParameterMap::const_iterator it = red_codec.params.find("");
+ if (it != red_codec.params.end()) {
+ red_params = it->second;
+ std::vector<std::string> red_pts;
+ if (talk_base::split(red_params, '/', &red_pts) != 2 ||
+ red_pts[0] != red_pts[1] ||
+ !talk_base::FromString(red_pts[0], &red_pt)) {
+ LOG(LS_WARNING) << "RED params " << red_params << " not supported.";
+ return false;
+ }
+ } else if (red_codec.params.empty()) {
+ LOG(LS_WARNING) << "RED params not present, using defaults";
+ if (all_codecs.size() > 1) {
+ red_pt = all_codecs[1].id;
+ }
+ }
+
+ // Try to find red_pt in |codecs|.
+ std::vector<AudioCodec>::const_iterator codec;
+ for (codec = all_codecs.begin(); codec != all_codecs.end(); ++codec) {
+ if (codec->id == red_pt)
+ break;
+ }
+
+ // If we find the right codec, that will be the codec we pass to
+ // SetSendCodec, with the desired payload type.
+ if (codec != all_codecs.end() &&
+ engine()->FindWebRtcCodec(*codec, send_codec)) {
+ } else {
+ LOG(LS_WARNING) << "RED params " << red_params << " are invalid.";
+ return false;
+ }
+
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::EnableRtcp(int channel) {
+ if (engine()->voe()->rtp()->SetRTCPStatus(channel, true) == -1) {
+ LOG_RTCERR2(SetRTCPStatus, channel, 1);
+ return false;
+ }
+ // TODO(juberti): Enable VQMon and RTCP XR reports, once we know what
+ // what we want to do with them.
+ // engine()->voe().EnableVQMon(voe_channel(), true);
+ // engine()->voe().EnableRTCP_XR(voe_channel(), true);
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::ResetRecvCodecs(int channel) {
+ int ncodecs = engine()->voe()->codec()->NumOfCodecs();
+ for (int i = 0; i < ncodecs; ++i) {
+ webrtc::CodecInst voe_codec;
+ if (engine()->voe()->codec()->GetCodec(i, voe_codec) != -1) {
+ voe_codec.pltype = -1;
+ if (engine()->voe()->codec()->SetRecPayloadType(
+ channel, voe_codec) == -1) {
+ LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec));
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) {
+ if (playout) {
+ LOG(LS_INFO) << "Starting playout for channel #" << channel;
+ if (engine()->voe()->base()->StartPlayout(channel) == -1) {
+ LOG_RTCERR1(StartPlayout, channel);
+ return false;
+ }
+ } else {
+ LOG(LS_INFO) << "Stopping playout for channel #" << channel;
+ engine()->voe()->base()->StopPlayout(channel);
+ }
+ return true;
+}
+
+uint32 WebRtcVoiceMediaChannel::ParseSsrc(const void* data, size_t len,
+ bool rtcp) {
+ size_t ssrc_pos = (!rtcp) ? 8 : 4;
+ uint32 ssrc = 0;
+ if (len >= (ssrc_pos + sizeof(ssrc))) {
+ ssrc = talk_base::GetBE32(static_cast<const char*>(data) + ssrc_pos);
+ }
+ return ssrc;
+}
+
+// Convert VoiceEngine error code into VoiceMediaChannel::Error enum.
+VoiceMediaChannel::Error
+ WebRtcVoiceMediaChannel::WebRtcErrorToChannelError(int err_code) {
+ switch (err_code) {
+ case 0:
+ return ERROR_NONE;
+ case VE_CANNOT_START_RECORDING:
+ case VE_MIC_VOL_ERROR:
+ case VE_GET_MIC_VOL_ERROR:
+ case VE_CANNOT_ACCESS_MIC_VOL:
+ return ERROR_REC_DEVICE_OPEN_FAILED;
+ case VE_SATURATION_WARNING:
+ return ERROR_REC_DEVICE_SATURATION;
+ case VE_REC_DEVICE_REMOVED:
+ return ERROR_REC_DEVICE_REMOVED;
+ case VE_RUNTIME_REC_WARNING:
+ case VE_RUNTIME_REC_ERROR:
+ return ERROR_REC_RUNTIME_ERROR;
+ case VE_CANNOT_START_PLAYOUT:
+ case VE_SPEAKER_VOL_ERROR:
+ case VE_GET_SPEAKER_VOL_ERROR:
+ case VE_CANNOT_ACCESS_SPEAKER_VOL:
+ return ERROR_PLAY_DEVICE_OPEN_FAILED;
+ case VE_RUNTIME_PLAY_WARNING:
+ case VE_RUNTIME_PLAY_ERROR:
+ return ERROR_PLAY_RUNTIME_ERROR;
+ case VE_TYPING_NOISE_WARNING:
+ return ERROR_REC_TYPING_NOISE_DETECTED;
+ default:
+ return VoiceMediaChannel::ERROR_OTHER;
+ }
+}
+
+int WebRtcSoundclipStream::Read(void *buf, int len) {
+ size_t res = 0;
+ mem_.Read(buf, len, &res, NULL);
+ return static_cast<int>(res);
+}
+
+int WebRtcSoundclipStream::Rewind() {
+ mem_.Rewind();
+ // Return -1 to keep VoiceEngine from looping.
+ return (loop_) ? 0 : -1;
+}
+
+} // namespace cricket
+
+#endif // HAVE_WEBRTC_VOICE
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine.h b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine.h
new file mode 100644
index 00000000000..0c2b613ac46
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine.h
@@ -0,0 +1,428 @@
+/*
+ * libjingle
+ * Copyright 2004 Google Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TALK_MEDIA_WEBRTCVOICEENGINE_H_
+#define TALK_MEDIA_WEBRTCVOICEENGINE_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "talk/base/buffer.h"
+#include "talk/base/byteorder.h"
+#include "talk/base/logging.h"
+#include "talk/base/scoped_ptr.h"
+#include "talk/base/stream.h"
+#include "talk/media/base/rtputils.h"
+#include "talk/media/webrtc/webrtccommon.h"
+#include "talk/media/webrtc/webrtcexport.h"
+#include "talk/media/webrtc/webrtcvoe.h"
+#include "talk/session/media/channel.h"
+
+#if !defined(LIBPEERCONNECTION_LIB) && \
+ !defined(LIBPEERCONNECTION_IMPLEMENTATION)
+#error "Bogus include."
+#endif
+
+
+namespace cricket {
+
+// WebRtcSoundclipStream is an adapter object that allows a memory stream to be
+// passed into WebRtc, and support looping.
+class WebRtcSoundclipStream : public webrtc::InStream {
+ public:
+ WebRtcSoundclipStream(const char* buf, size_t len)
+ : mem_(buf, len), loop_(true) {
+ }
+ void set_loop(bool loop) { loop_ = loop; }
+ virtual int Read(void* buf, int len);
+ virtual int Rewind();
+
+ private:
+ talk_base::MemoryStream mem_;
+ bool loop_;
+};
+
+// WebRtcMonitorStream is used to monitor a stream coming from WebRtc.
+// For now we just dump the data.
+class WebRtcMonitorStream : public webrtc::OutStream {
+ virtual bool Write(const void *buf, int len) {
+ return true;
+ }
+};
+
+class AudioDeviceModule;
+class AudioRenderer;
+class VoETraceWrapper;
+class VoEWrapper;
+class VoiceProcessor;
+class WebRtcSoundclipMedia;
+class WebRtcVoiceMediaChannel;
+
+// WebRtcVoiceEngine is a class to be used with CompositeMediaEngine.
+// It uses the WebRtc VoiceEngine library for audio handling.
+class WebRtcVoiceEngine
+ : public webrtc::VoiceEngineObserver,
+ public webrtc::TraceCallback,
+ public webrtc::VoEMediaProcess {
+ public:
+ WebRtcVoiceEngine();
+ // Dependency injection for testing.
+ WebRtcVoiceEngine(VoEWrapper* voe_wrapper,
+ VoEWrapper* voe_wrapper_sc,
+ VoETraceWrapper* tracing);
+ ~WebRtcVoiceEngine();
+ bool Init(talk_base::Thread* worker_thread);
+ void Terminate();
+
+ int GetCapabilities();
+ VoiceMediaChannel* CreateChannel();
+
+ SoundclipMedia* CreateSoundclip();
+
+ // TODO(pthatcher): Rename to SetOptions and replace the old
+ // flags-based SetOptions.
+ bool SetAudioOptions(const AudioOptions& options);
+ // Eventually, we will replace them with AudioOptions.
+ // In the meantime, we leave this here for backwards compat.
+ bool SetOptions(int flags);
+ // Overrides, when set, take precedence over the options on a
+ // per-option basis. For example, if AGC is set in options and AEC
+ // is set in overrides, AGC and AEC will be both be set. Overrides
+ // can also turn off options. For example, if AGC is set to "on" in
+ // options and AGC is set to "off" in overrides, the result is that
+ // AGC will be off until different overrides are applied or until
+ // the overrides are cleared. Only one set of overrides is present
+ // at a time (they do not "stack"). And when the overrides are
+ // cleared, the media engine's state reverts back to the options set
+ // via SetOptions. This allows us to have both "persistent options"
+ // (the normal options) and "temporary options" (overrides).
+ bool SetOptionOverrides(const AudioOptions& options);
+ bool ClearOptionOverrides();
+ bool SetDelayOffset(int offset);
+ bool SetDevices(const Device* in_device, const Device* out_device);
+ bool GetOutputVolume(int* level);
+ bool SetOutputVolume(int level);
+ int GetInputLevel();
+ bool SetLocalMonitor(bool enable);
+
+ const std::vector<AudioCodec>& codecs();
+ bool FindCodec(const AudioCodec& codec);
+ bool FindWebRtcCodec(const AudioCodec& codec, webrtc::CodecInst* gcodec);
+
+ const std::vector<RtpHeaderExtension>& rtp_header_extensions() const;
+
+ void SetLogging(int min_sev, const char* filter);
+
+ bool RegisterProcessor(uint32 ssrc,
+ VoiceProcessor* voice_processor,
+ MediaProcessorDirection direction);
+ bool UnregisterProcessor(uint32 ssrc,
+ VoiceProcessor* voice_processor,
+ MediaProcessorDirection direction);
+
+ // Method from webrtc::VoEMediaProcess
+ virtual void Process(int channel,
+ webrtc::ProcessingTypes type,
+ int16_t audio10ms[],
+ int length,
+ int sampling_freq,
+ bool is_stereo);
+
+ // For tracking WebRtc channels. Needed because we have to pause them
+ // all when switching devices.
+ // May only be called by WebRtcVoiceMediaChannel.
+ void RegisterChannel(WebRtcVoiceMediaChannel *channel);
+ void UnregisterChannel(WebRtcVoiceMediaChannel *channel);
+
+ // May only be called by WebRtcSoundclipMedia.
+ void RegisterSoundclip(WebRtcSoundclipMedia *channel);
+ void UnregisterSoundclip(WebRtcSoundclipMedia *channel);
+
+ // Called by WebRtcVoiceMediaChannel to set a gain offset from
+ // the default AGC target level.
+ bool AdjustAgcLevel(int delta);
+
+ VoEWrapper* voe() { return voe_wrapper_.get(); }
+ VoEWrapper* voe_sc() { return voe_wrapper_sc_.get(); }
+ int GetLastEngineError();
+
+ // Set the external ADMs. This can only be called before Init.
+ bool SetAudioDeviceModule(webrtc::AudioDeviceModule* adm,
+ webrtc::AudioDeviceModule* adm_sc);
+
+ // Check whether the supplied trace should be ignored.
+ bool ShouldIgnoreTrace(const std::string& trace);
+
+ private:
+ typedef std::vector<WebRtcSoundclipMedia *> SoundclipList;
+ typedef std::vector<WebRtcVoiceMediaChannel *> ChannelList;
+ typedef sigslot::
+ signal3<uint32, MediaProcessorDirection, AudioFrame*> FrameSignal;
+
+ void Construct();
+ void ConstructCodecs();
+ bool InitInternal();
+ void SetTraceFilter(int filter);
+ void SetTraceOptions(const std::string& options);
+ // Applies either options or overrides. Every option that is "set"
+ // will be applied. Every option not "set" will be ignored. This
+ // allows us to selectively turn on and off different options easily
+ // at any time.
+ bool ApplyOptions(const AudioOptions& options);
+ virtual void Print(webrtc::TraceLevel level, const char* trace, int length);
+ virtual void CallbackOnError(int channel, int errCode);
+ // Given the device type, name, and id, find device id. Return true and
+ // set the output parameter rtc_id if successful.
+ bool FindWebRtcAudioDeviceId(
+ bool is_input, const std::string& dev_name, int dev_id, int* rtc_id);
+ bool FindChannelAndSsrc(int channel_num,
+ WebRtcVoiceMediaChannel** channel,
+ uint32* ssrc) const;
+ bool FindChannelNumFromSsrc(uint32 ssrc,
+ MediaProcessorDirection direction,
+ int* channel_num);
+ bool ChangeLocalMonitor(bool enable);
+ bool PauseLocalMonitor();
+ bool ResumeLocalMonitor();
+
+ bool UnregisterProcessorChannel(MediaProcessorDirection channel_direction,
+ uint32 ssrc,
+ VoiceProcessor* voice_processor,
+ MediaProcessorDirection processor_direction);
+
+ void StartAecDump(const std::string& filename);
+ void StopAecDump();
+
+ // When a voice processor registers with the engine, it is connected
+ // to either the Rx or Tx signals, based on the direction parameter.
+ // SignalXXMediaFrame will be invoked for every audio packet.
+ FrameSignal SignalRxMediaFrame;
+ FrameSignal SignalTxMediaFrame;
+
+ static const int kDefaultLogSeverity = talk_base::LS_WARNING;
+
+ // The primary instance of WebRtc VoiceEngine.
+ talk_base::scoped_ptr<VoEWrapper> voe_wrapper_;
+ // A secondary instance, for playing out soundclips (on the 'ring' device).
+ talk_base::scoped_ptr<VoEWrapper> voe_wrapper_sc_;
+ talk_base::scoped_ptr<VoETraceWrapper> tracing_;
+ // The external audio device manager
+ webrtc::AudioDeviceModule* adm_;
+ webrtc::AudioDeviceModule* adm_sc_;
+ int log_filter_;
+ std::string log_options_;
+ bool is_dumping_aec_;
+ std::vector<AudioCodec> codecs_;
+ std::vector<RtpHeaderExtension> rtp_header_extensions_;
+ bool desired_local_monitor_enable_;
+ talk_base::scoped_ptr<WebRtcMonitorStream> monitor_;
+ SoundclipList soundclips_;
+ ChannelList channels_;
+ // channels_ can be read from WebRtc callback thread. We need a lock on that
+ // callback as well as the RegisterChannel/UnregisterChannel.
+ talk_base::CriticalSection channels_cs_;
+ webrtc::AgcConfig default_agc_config_;
+ bool initialized_;
+ // See SetOptions and SetOptionOverrides for a description of the
+ // difference between options and overrides.
+ // options_ are the base options, which combined with the
+ // option_overrides_, create the current options being used.
+ // options_ is stored so that when option_overrides_ is cleared, we
+ // can restore the options_ without the option_overrides.
+ AudioOptions options_;
+ AudioOptions option_overrides_;
+
+ // When the media processor registers with the engine, the ssrc is cached
+ // here so that a look up need not be made when the callback is invoked.
+ // This is necessary because the lookup results in mux_channels_cs lock being
+ // held and if a remote participant leaves the hangout at the same time
+ // we hit a deadlock.
+ uint32 tx_processor_ssrc_;
+ uint32 rx_processor_ssrc_;
+
+ talk_base::CriticalSection signal_media_critical_;
+};
+
+// WebRtcMediaChannel is a class that implements the common WebRtc channel
+// functionality.
+template <class T, class E>
+class WebRtcMediaChannel : public T, public webrtc::Transport {
+ public:
+ WebRtcMediaChannel(E *engine, int channel)
+ : engine_(engine), voe_channel_(channel) {}
+ E *engine() { return engine_; }
+ int voe_channel() const { return voe_channel_; }
+ bool valid() const { return voe_channel_ != -1; }
+
+ protected:
+ // implements Transport interface
+ virtual int SendPacket(int channel, const void *data, int len) {
+ talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+ if (!T::SendPacket(&packet)) {
+ return -1;
+ }
+ return len;
+ }
+
+ virtual int SendRTCPPacket(int channel, const void *data, int len) {
+ talk_base::Buffer packet(data, len, kMaxRtpPacketLen);
+ return T::SendRtcp(&packet) ? len : -1;
+ }
+
+ private:
+ E *engine_;
+ int voe_channel_;
+};
+
+// WebRtcVoiceMediaChannel is an implementation of VoiceMediaChannel that uses
+// WebRtc Voice Engine.
+class WebRtcVoiceMediaChannel
+ : public WebRtcMediaChannel<VoiceMediaChannel, WebRtcVoiceEngine> {
+ public:
+ explicit WebRtcVoiceMediaChannel(WebRtcVoiceEngine *engine);
+ virtual ~WebRtcVoiceMediaChannel();
+ virtual bool SetOptions(const AudioOptions& options);
+ virtual bool GetOptions(AudioOptions* options) const {
+ *options = options_;
+ return true;
+ }
+ virtual bool SetRecvCodecs(const std::vector<AudioCodec> &codecs);
+ virtual bool SetSendCodecs(const std::vector<AudioCodec> &codecs);
+ virtual bool SetRecvRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions);
+ virtual bool SetSendRtpHeaderExtensions(
+ const std::vector<RtpHeaderExtension>& extensions);
+ virtual bool SetPlayout(bool playout);
+ bool PausePlayout();
+ bool ResumePlayout();
+ virtual bool SetSend(SendFlags send);
+ bool PauseSend();
+ bool ResumeSend();
+ virtual bool AddSendStream(const StreamParams& sp);
+ virtual bool RemoveSendStream(uint32 ssrc);
+ virtual bool AddRecvStream(const StreamParams& sp);
+ virtual bool RemoveRecvStream(uint32 ssrc);
+ virtual bool SetRemoteRenderer(uint32 ssrc, AudioRenderer* renderer);
+ virtual bool SetLocalRenderer(uint32 ssrc, AudioRenderer* renderer);
+ virtual bool GetActiveStreams(AudioInfo::StreamList* actives);
+ virtual int GetOutputLevel();
+ virtual int GetTimeSinceLastTyping();
+ virtual void SetTypingDetectionParameters(int time_window,
+ int cost_per_typing, int reporting_threshold, int penalty_decay,
+ int type_event_delay);
+ virtual bool SetOutputScaling(uint32 ssrc, double left, double right);
+ virtual bool GetOutputScaling(uint32 ssrc, double* left, double* right);
+
+ virtual bool SetRingbackTone(const char *buf, int len);
+ virtual bool PlayRingbackTone(uint32 ssrc, bool play, bool loop);
+ virtual bool CanInsertDtmf();
+ virtual bool InsertDtmf(uint32 ssrc, int event, int duration, int flags);
+
+ virtual void OnPacketReceived(talk_base::Buffer* packet);
+ virtual void OnRtcpReceived(talk_base::Buffer* packet);
+ virtual void OnReadyToSend(bool ready) {}
+ virtual bool MuteStream(uint32 ssrc, bool on);
+ virtual bool SetSendBandwidth(bool autobw, int bps);
+ virtual bool GetStats(VoiceMediaInfo* info);
+ // Gets last reported error from WebRtc voice engine. This should be only
+ // called in response a failure.
+ virtual void GetLastMediaError(uint32* ssrc,
+ VoiceMediaChannel::Error* error);
+ bool FindSsrc(int channel_num, uint32* ssrc);
+ void OnError(uint32 ssrc, int error);
+
+ bool sending() const { return send_ != SEND_NOTHING; }
+ int GetReceiveChannelNum(uint32 ssrc);
+ int GetSendChannelNum(uint32 ssrc);
+
+ protected:
+ int GetLastEngineError() { return engine()->GetLastEngineError(); }
+ int GetOutputLevel(int channel);
+ bool GetRedSendCodec(const AudioCodec& red_codec,
+ const std::vector<AudioCodec>& all_codecs,
+ webrtc::CodecInst* send_codec);
+ bool EnableRtcp(int channel);
+ bool ResetRecvCodecs(int channel);
+ bool SetPlayout(int channel, bool playout);
+ static uint32 ParseSsrc(const void* data, size_t len, bool rtcp);
+ static Error WebRtcErrorToChannelError(int err_code);
+
+ private:
+ struct WebRtcVoiceChannelInfo;
+ typedef std::map<uint32, WebRtcVoiceChannelInfo> ChannelMap;
+
+ void SetNack(uint32 ssrc, int channel, bool nack_enabled);
+ void SetNack(const ChannelMap& channels, bool nack_enabled);
+ bool SetSendCodec(const webrtc::CodecInst& send_codec);
+ bool SetSendCodec(int channel, const webrtc::CodecInst& send_codec);
+ bool ChangePlayout(bool playout);
+ bool ChangeSend(SendFlags send);
+ bool ChangeSend(int channel, SendFlags send);
+ void ConfigureSendChannel(int channel);
+ bool DeleteChannel(int channel);
+ bool InConferenceMode() const {
+ return options_.conference_mode.GetWithDefaultIfUnset(false);
+ }
+ bool IsDefaultChannel(int channel_id) const {
+ return channel_id == voe_channel();
+ }
+
+ talk_base::scoped_ptr<WebRtcSoundclipStream> ringback_tone_;
+ std::set<int> ringback_channels_; // channels playing ringback
+ std::vector<AudioCodec> recv_codecs_;
+ talk_base::scoped_ptr<webrtc::CodecInst> send_codec_;
+ AudioOptions options_;
+ bool dtmf_allowed_;
+ bool desired_playout_;
+ bool nack_enabled_;
+ bool playout_;
+ SendFlags desired_send_;
+ SendFlags send_;
+
+ // send_channels_ contains the channels which are being used for sending.
+ // When the default channel (voe_channel) is used for sending, it is
+ // contained in send_channels_, otherwise not.
+ ChannelMap send_channels_;
+ uint32 default_receive_ssrc_;
+ // Note the default channel (voe_channel()) can reside in both
+ // receive_channels_ and send_channels_ in non-conference mode and in that
+ // case it will only be there if a non-zero default_receive_ssrc_ is set.
+ ChannelMap receive_channels_; // for multiple sources
+ // receive_channels_ can be read from WebRtc callback thread. Access from
+ // the WebRtc thread must be synchronized with edits on the worker thread.
+ // Reads on the worker thread are ok.
+ //
+ // Do not lock this on the VoE media processor thread; potential for deadlock
+ // exists.
+ mutable talk_base::CriticalSection receive_channels_cs_;
+};
+
+} // namespace cricket
+
+#endif // TALK_MEDIA_WEBRTCVOICEENGINE_H_
diff --git a/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine_unittest.cc b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine_unittest.cc
new file mode 100644
index 00000000000..31596cd726d
--- /dev/null
+++ b/chromium/third_party/libjingle/source/talk/media/webrtc/webrtcvoiceengine_unittest.cc
@@ -0,0 +1,2844 @@
+// Copyright 2008 Google Inc.
+//
+// Author: Justin Uberti (juberti@google.com)
+
+#ifdef WIN32
+#include "talk/base/win32.h"
+#include <objbase.h>
+#endif
+
+#include "talk/base/byteorder.h"
+#include "talk/base/gunit.h"
+#include "talk/media/base/constants.h"
+#include "talk/media/base/fakemediaengine.h"
+#include "talk/media/base/fakemediaprocessor.h"
+#include "talk/media/base/fakertp.h"
+#include "talk/media/webrtc/fakewebrtcvoiceengine.h"
+#include "talk/media/webrtc/webrtcvoiceengine.h"
+#include "talk/p2p/base/fakesession.h"
+#include "talk/session/media/channel.h"
+
+// Tests for the WebRtcVoiceEngine/VoiceChannel code.
+
+static const cricket::AudioCodec kPcmuCodec(0, "PCMU", 8000, 64000, 1, 0);
+static const cricket::AudioCodec kIsacCodec(103, "ISAC", 16000, 32000, 1, 0);
+static const cricket::AudioCodec kCeltCodec(110, "CELT", 32000, 64000, 2, 0);
+static const cricket::AudioCodec kOpusCodec(111, "opus", 48000, 64000, 2, 0);
+static const cricket::AudioCodec kRedCodec(117, "red", 8000, 0, 1, 0);
+static const cricket::AudioCodec kCn8000Codec(13, "CN", 8000, 0, 1, 0);
+static const cricket::AudioCodec kCn16000Codec(105, "CN", 16000, 0, 1, 0);
+static const cricket::AudioCodec
+ kTelephoneEventCodec(106, "telephone-event", 8000, 0, 1, 0);
+static const cricket::AudioCodec* const kAudioCodecs[] = {
+ &kPcmuCodec, &kIsacCodec, &kCeltCodec, &kOpusCodec, &kRedCodec,
+ &kCn8000Codec, &kCn16000Codec, &kTelephoneEventCodec,
+};
+const char kRingbackTone[] = "RIFF____WAVE____ABCD1234";
+static uint32 kSsrc1 = 0x99;
+static uint32 kSsrc2 = 0x98;
+
+class FakeVoEWrapper : public cricket::VoEWrapper {
+ public:
+ explicit FakeVoEWrapper(cricket::FakeWebRtcVoiceEngine* engine)
+ : cricket::VoEWrapper(engine, // processing
+ engine, // base
+ engine, // codec
+ engine, // dtmf
+ engine, // file
+ engine, // hw
+ engine, // media
+ engine, // neteq
+ engine, // network
+ engine, // rtp
+ engine, // sync
+ engine) { // volume
+ }
+};
+
+class NullVoETraceWrapper : public cricket::VoETraceWrapper {
+ public:
+ virtual int SetTraceFilter(const unsigned int filter) {
+ return 0;
+ }
+ virtual int SetTraceFile(const char* fileNameUTF8) {
+ return 0;
+ }
+ virtual int SetTraceCallback(webrtc::TraceCallback* callback) {
+ return 0;
+ }
+};
+
+class WebRtcVoiceEngineTestFake : public testing::Test {
+ public:
+ class ChannelErrorListener : public sigslot::has_slots<> {
+ public:
+ explicit ChannelErrorListener(cricket::VoiceMediaChannel* channel)
+ : ssrc_(0), error_(cricket::VoiceMediaChannel::ERROR_NONE) {
+ ASSERT(channel != NULL);
+ channel->SignalMediaError.connect(
+ this, &ChannelErrorListener::OnVoiceChannelError);
+ }
+ void OnVoiceChannelError(uint32 ssrc,
+ cricket::VoiceMediaChannel::Error error) {
+ ssrc_ = ssrc;
+ error_ = error;
+ }
+ void Reset() {
+ ssrc_ = 0;
+ error_ = cricket::VoiceMediaChannel::ERROR_NONE;
+ }
+ uint32 ssrc() const {
+ return ssrc_;
+ }
+ cricket::VoiceMediaChannel::Error error() const {
+ return error_;
+ }
+
+ private:
+ uint32 ssrc_;
+ cricket::VoiceMediaChannel::Error error_;
+ };
+
+ WebRtcVoiceEngineTestFake()
+ : voe_(kAudioCodecs, ARRAY_SIZE(kAudioCodecs)),
+ voe_sc_(kAudioCodecs, ARRAY_SIZE(kAudioCodecs)),
+ engine_(new FakeVoEWrapper(&voe_),
+ new FakeVoEWrapper(&voe_sc_),
+ new NullVoETraceWrapper()),
+ channel_(NULL), soundclip_(NULL) {
+ options_conference_.conference_mode.Set(true);
+ options_adjust_agc_.adjust_agc_delta.Set(-10);
+ }
+ bool SetupEngine() {
+ bool result = engine_.Init(talk_base::Thread::Current());
+ if (result) {
+ channel_ = engine_.CreateChannel();
+ result = (channel_ != NULL);
+ }
+ if (result) {
+ result = channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrc1));
+ }
+ return result;
+ }
+ void SetupForMultiSendStream() {
+ EXPECT_TRUE(SetupEngine());
+ // Remove stream added in Setup, which is corresponding to default channel.
+ int default_channel_num = voe_.GetLastChannel();
+ uint32 default_send_ssrc;
+ EXPECT_EQ(0, voe_.GetLocalSSRC(default_channel_num, default_send_ssrc));
+ EXPECT_EQ(kSsrc1, default_send_ssrc);
+ EXPECT_TRUE(channel_->RemoveSendStream(default_send_ssrc));
+
+ // Verify the default channel still exists.
+ EXPECT_EQ(0, voe_.GetLocalSSRC(default_channel_num, default_send_ssrc));
+ }
+ void DeliverPacket(const void* data, int len) {
+ talk_base::Buffer packet(data, len);
+ channel_->OnPacketReceived(&packet);
+ }
+ virtual void TearDown() {
+ delete soundclip_;
+ delete channel_;
+ engine_.Terminate();
+ }
+
+ void TestInsertDtmf(uint32 ssrc, int channel_id) {
+ // Test we can only InsertDtmf when the other side supports telephone-event.
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_FALSE(channel_->CanInsertDtmf());
+ EXPECT_FALSE(channel_->InsertDtmf(ssrc, 1, 111, cricket::DF_SEND));
+ codecs.push_back(kTelephoneEventCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->CanInsertDtmf());
+ // Check we fail if the ssrc is invalid.
+ EXPECT_FALSE(channel_->InsertDtmf(-1, 1, 111, cricket::DF_SEND));
+
+ // Test send
+ EXPECT_FALSE(voe_.WasSendTelephoneEventCalled(channel_id, 2, 123));
+ EXPECT_TRUE(channel_->InsertDtmf(ssrc, 2, 123, cricket::DF_SEND));
+ EXPECT_TRUE(voe_.WasSendTelephoneEventCalled(channel_id, 2, 123));
+
+ // Test play
+ EXPECT_FALSE(voe_.WasPlayDtmfToneCalled(3, 134));
+ EXPECT_TRUE(channel_->InsertDtmf(ssrc, 3, 134, cricket::DF_PLAY));
+ EXPECT_TRUE(voe_.WasPlayDtmfToneCalled(3, 134));
+
+ // Test send and play
+ EXPECT_FALSE(voe_.WasSendTelephoneEventCalled(channel_id, 4, 145));
+ EXPECT_FALSE(voe_.WasPlayDtmfToneCalled(4, 145));
+ EXPECT_TRUE(channel_->InsertDtmf(ssrc, 4, 145,
+ cricket::DF_PLAY | cricket::DF_SEND));
+ EXPECT_TRUE(voe_.WasSendTelephoneEventCalled(channel_id, 4, 145));
+ EXPECT_TRUE(voe_.WasPlayDtmfToneCalled(4, 145));
+ }
+
+ // Test that send bandwidth is set correctly.
+ // |codec| is the codec under test.
+ // |default_bitrate| is the default bitrate for the codec.
+ // |auto_bitrate| is a parameter to set to SetSendBandwidth().
+ // |desired_bitrate| is a parameter to set to SetSendBandwidth().
+ // |expected_result| is expected results from SetSendBandwidth().
+ void TestSendBandwidth(const cricket::AudioCodec& codec,
+ int default_bitrate,
+ bool auto_bitrate,
+ int desired_bitrate,
+ bool expected_result) {
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+
+ codecs.push_back(codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ webrtc::CodecInst temp_codec;
+ EXPECT_FALSE(voe_.GetSendCodec(channel_num, temp_codec));
+ EXPECT_EQ(default_bitrate, temp_codec.rate);
+
+ bool result = channel_->SetSendBandwidth(auto_bitrate, desired_bitrate);
+ EXPECT_EQ(expected_result, result);
+
+ EXPECT_FALSE(voe_.GetSendCodec(channel_num, temp_codec));
+
+ if (result) {
+ // If SetSendBandwidth() returns true then bitrate is set correctly.
+ if (auto_bitrate) {
+ EXPECT_EQ(default_bitrate, temp_codec.rate);
+ } else {
+ EXPECT_EQ(desired_bitrate, temp_codec.rate);
+ }
+ } else {
+ // If SetSendBandwidth() returns false then bitrate is set to the
+ // default value.
+ EXPECT_EQ(default_bitrate, temp_codec.rate);
+ }
+ }
+
+
+ void TestSetSendRtpHeaderExtensions(int channel_id) {
+ std::vector<cricket::RtpHeaderExtension> extensions;
+ bool enable = false;
+ unsigned char id = 0;
+
+ // Ensure audio levels are off by default.
+ EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+ channel_id, enable, id));
+ EXPECT_FALSE(enable);
+
+ // Ensure unknown extensions won't cause an error.
+ extensions.push_back(cricket::RtpHeaderExtension(
+ "urn:ietf:params:unknowextention", 1));
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+ channel_id, enable, id));
+ EXPECT_FALSE(enable);
+
+ // Ensure audio levels stay off with an empty list of headers.
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+ channel_id, enable, id));
+ EXPECT_FALSE(enable);
+
+ // Ensure audio levels are enabled if the audio-level header is specified.
+ extensions.push_back(cricket::RtpHeaderExtension(
+ "urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8));
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+ channel_id, enable, id));
+ EXPECT_TRUE(enable);
+ EXPECT_EQ(8, id);
+
+ // Ensure audio levels go back off with an empty list.
+ extensions.clear();
+ EXPECT_TRUE(channel_->SetSendRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+ channel_id, enable, id));
+ EXPECT_FALSE(enable);
+ }
+
+ protected:
+ cricket::FakeWebRtcVoiceEngine voe_;
+ cricket::FakeWebRtcVoiceEngine voe_sc_;
+ cricket::WebRtcVoiceEngine engine_;
+ cricket::VoiceMediaChannel* channel_;
+ cricket::SoundclipMedia* soundclip_;
+
+ cricket::AudioOptions options_conference_;
+ cricket::AudioOptions options_adjust_agc_;
+};
+
+// Tests that our stub library "works".
+TEST_F(WebRtcVoiceEngineTestFake, StartupShutdown) {
+ EXPECT_FALSE(voe_.IsInited());
+ EXPECT_FALSE(voe_sc_.IsInited());
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ EXPECT_TRUE(voe_.IsInited());
+ EXPECT_TRUE(voe_sc_.IsInited());
+ engine_.Terminate();
+ EXPECT_FALSE(voe_.IsInited());
+ EXPECT_FALSE(voe_sc_.IsInited());
+}
+
+// Tests that we can create and destroy a channel.
+TEST_F(WebRtcVoiceEngineTestFake, CreateChannel) {
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ channel_ = engine_.CreateChannel();
+ EXPECT_TRUE(channel_ != NULL);
+}
+
+// Tests that we properly handle failures in CreateChannel.
+TEST_F(WebRtcVoiceEngineTestFake, CreateChannelFail) {
+ voe_.set_fail_create_channel(true);
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ channel_ = engine_.CreateChannel();
+ EXPECT_TRUE(channel_ == NULL);
+}
+
+// Tests that the list of supported codecs is created properly and ordered
+// correctly
+TEST_F(WebRtcVoiceEngineTestFake, CodecPreference) {
+ const std::vector<cricket::AudioCodec>& codecs = engine_.codecs();
+ ASSERT_FALSE(codecs.empty());
+ EXPECT_STRCASEEQ("opus", codecs[0].name.c_str());
+ EXPECT_EQ(48000, codecs[0].clockrate);
+ EXPECT_EQ(2, codecs[0].channels);
+ EXPECT_EQ(64000, codecs[0].bitrate);
+ int pref = codecs[0].preference;
+ for (size_t i = 1; i < codecs.size(); ++i) {
+ EXPECT_GT(pref, codecs[i].preference);
+ pref = codecs[i].preference;
+ }
+}
+
+// Tests that we can find codecs by name or id, and that we interpret the
+// clockrate and bitrate fields properly.
+TEST_F(WebRtcVoiceEngineTestFake, FindCodec) {
+ cricket::AudioCodec codec;
+ webrtc::CodecInst codec_inst;
+ // Find PCMU with explicit clockrate and bitrate.
+ EXPECT_TRUE(engine_.FindWebRtcCodec(kPcmuCodec, &codec_inst));
+ // Find ISAC with explicit clockrate and 0 bitrate.
+ EXPECT_TRUE(engine_.FindWebRtcCodec(kIsacCodec, &codec_inst));
+ // Find telephone-event with explicit clockrate and 0 bitrate.
+ EXPECT_TRUE(engine_.FindWebRtcCodec(kTelephoneEventCodec, &codec_inst));
+ // Find ISAC with a different payload id.
+ codec = kIsacCodec;
+ codec.id = 127;
+ EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+ EXPECT_EQ(codec.id, codec_inst.pltype);
+ // Find PCMU with a 0 clockrate.
+ codec = kPcmuCodec;
+ codec.clockrate = 0;
+ EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+ EXPECT_EQ(codec.id, codec_inst.pltype);
+ EXPECT_EQ(8000, codec_inst.plfreq);
+ // Find PCMU with a 0 bitrate.
+ codec = kPcmuCodec;
+ codec.bitrate = 0;
+ EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+ EXPECT_EQ(codec.id, codec_inst.pltype);
+ EXPECT_EQ(64000, codec_inst.rate);
+ // Find ISAC with an explicit bitrate.
+ codec = kIsacCodec;
+ codec.bitrate = 32000;
+ EXPECT_TRUE(engine_.FindWebRtcCodec(codec, &codec_inst));
+ EXPECT_EQ(codec.id, codec_inst.pltype);
+ EXPECT_EQ(32000, codec_inst.rate);
+}
+
+// Test that we set our inbound codecs properly, including changing PT.
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecs) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs.push_back(kTelephoneEventCodec);
+ codecs[0].id = 106; // collide with existing telephone-event
+ codecs[2].id = 126;
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ talk_base::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "ISAC");
+ gcodec.plfreq = 16000;
+ gcodec.channels = 1;
+ EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num, gcodec));
+ EXPECT_EQ(106, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ talk_base::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname),
+ "telephone-event");
+ gcodec.plfreq = 8000;
+ EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num, gcodec));
+ EXPECT_EQ(126, gcodec.pltype);
+ EXPECT_STREQ("telephone-event", gcodec.plname);
+}
+
+// Test that we fail to set an unknown inbound codec.
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsUnsupportedCodec) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(cricket::AudioCodec(127, "XYZ", 32000, 0, 1, 0));
+ EXPECT_FALSE(channel_->SetRecvCodecs(codecs));
+}
+
+// Test that we fail if we have duplicate types in the inbound list.
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsDuplicatePayloadType) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kCn16000Codec);
+ codecs[1].id = kIsacCodec.id;
+ EXPECT_FALSE(channel_->SetRecvCodecs(codecs));
+}
+
+// Test that we can decode OPUS without stereo parameters.
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpusNoStereo) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs.push_back(kOpusCodec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc1)));
+ int channel_num2 = voe_.GetLastChannel();
+ webrtc::CodecInst opus;
+ engine_.FindWebRtcCodec(kOpusCodec, &opus);
+ // Even without stereo parameters, recv codecs still specify channels = 2.
+ EXPECT_EQ(2, opus.channels);
+ EXPECT_EQ(111, opus.pltype);
+ EXPECT_STREQ("opus", opus.plname);
+ opus.pltype = 0;
+ EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num2, opus));
+ EXPECT_EQ(111, opus.pltype);
+}
+
+// Test that we can decode OPUS with stereo = 0.
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus0Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs.push_back(kOpusCodec);
+ codecs[2].params["stereo"] = "0";
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc1)));
+ int channel_num2 = voe_.GetLastChannel();
+ webrtc::CodecInst opus;
+ engine_.FindWebRtcCodec(kOpusCodec, &opus);
+ // Even when stereo is off, recv codecs still specify channels = 2.
+ EXPECT_EQ(2, opus.channels);
+ EXPECT_EQ(111, opus.pltype);
+ EXPECT_STREQ("opus", opus.plname);
+ opus.pltype = 0;
+ EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num2, opus));
+ EXPECT_EQ(111, opus.pltype);
+}
+
+// Test that we can decode OPUS with stereo = 1.
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithOpus1Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs.push_back(kOpusCodec);
+ codecs[2].params["stereo"] = "1";
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc1)));
+ int channel_num2 = voe_.GetLastChannel();
+ webrtc::CodecInst opus;
+ engine_.FindWebRtcCodec(kOpusCodec, &opus);
+ EXPECT_EQ(2, opus.channels);
+ EXPECT_EQ(111, opus.pltype);
+ EXPECT_STREQ("opus", opus.plname);
+ opus.pltype = 0;
+ EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num2, opus));
+ EXPECT_EQ(111, opus.pltype);
+}
+
+// Test that changes to recv codecs are applied to all streams.
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWithMultipleStreams) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs.push_back(kTelephoneEventCodec);
+ codecs[0].id = 106; // collide with existing telephone-event
+ codecs[2].id = 126;
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc1)));
+ int channel_num2 = voe_.GetLastChannel();
+ webrtc::CodecInst gcodec;
+ talk_base::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "ISAC");
+ gcodec.plfreq = 16000;
+ gcodec.channels = 1;
+ EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num2, gcodec));
+ EXPECT_EQ(106, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ talk_base::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname),
+ "telephone-event");
+ gcodec.plfreq = 8000;
+ gcodec.channels = 1;
+ EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num2, gcodec));
+ EXPECT_EQ(126, gcodec.pltype);
+ EXPECT_STREQ("telephone-event", gcodec.plname);
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsAfterAddingStreams) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs[0].id = 106; // collide with existing telephone-event
+
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc1)));
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+
+ int channel_num2 = voe_.GetLastChannel();
+ webrtc::CodecInst gcodec;
+ talk_base::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "ISAC");
+ gcodec.plfreq = 16000;
+ gcodec.channels = 1;
+ EXPECT_EQ(0, voe_.GetRecPayloadType(channel_num2, gcodec));
+ EXPECT_EQ(106, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+}
+
+// Test that we can apply the same set of codecs again while playing.
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvCodecsWhilePlaying) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kCn16000Codec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+
+ // Changing the payload type of a codec should fail.
+ codecs[0].id = 127;
+ EXPECT_FALSE(channel_->SetRecvCodecs(codecs));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can add a codec while playing.
+TEST_F(WebRtcVoiceEngineTestFake, AddRecvCodecsWhilePlaying) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kCn16000Codec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ EXPECT_TRUE(channel_->SetPlayout(true));
+
+ codecs.push_back(kOpusCodec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num));
+ webrtc::CodecInst gcodec;
+ EXPECT_TRUE(engine_.FindWebRtcCodec(kOpusCodec, &gcodec));
+ EXPECT_EQ(kOpusCodec.id, gcodec.pltype);
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, SetSendBandwidthAuto) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+
+ // Test that when autobw is true, bitrate is kept as the default
+ // value. autobw is true for the following tests.
+
+ // ISAC, default bitrate == 32000.
+ TestSendBandwidth(kIsacCodec, 32000, true, 96000, true);
+
+ // PCMU, default bitrate == 64000.
+ TestSendBandwidth(kPcmuCodec, 64000, true, 96000, true);
+
+ // CELT, default bitrate == 64000.
+ TestSendBandwidth(kCeltCodec, 64000, true, 96000, true);
+
+ // opus, default bitrate == 64000.
+ TestSendBandwidth(kOpusCodec, 64000, true, 96000, true);
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, SetSendBandwidthFixedMultiRate) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+
+ // Test that we can set bitrate if a multi-rate codec is used.
+ // autobw is false for the following tests.
+
+ // ISAC, default bitrate == 32000.
+ TestSendBandwidth(kIsacCodec, 32000, false, 128000, true);
+
+ // CELT, default bitrate == 64000.
+ TestSendBandwidth(kCeltCodec, 64000, false, 96000, true);
+
+ // opus, default bitrate == 64000.
+ TestSendBandwidth(kOpusCodec, 64000, false, 96000, true);
+}
+
+// Test that bitrate cannot be set for CBR codecs.
+// Bitrate is ignored if it is higher than the fixed bitrate.
+// Bitrate less then the fixed bitrate is an error.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendBandwidthFixedCbr) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetSendCodecs(engine_.codecs()));
+
+ webrtc::CodecInst codec;
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+
+ // PCMU, default bitrate == 64000.
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, codec));
+ EXPECT_EQ(64000, codec.rate);
+ EXPECT_TRUE(channel_->SetSendBandwidth(false, 128000));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, codec));
+ EXPECT_EQ(64000, codec.rate);
+ EXPECT_FALSE(channel_->SetSendBandwidth(false, 128));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, codec));
+ EXPECT_EQ(64000, codec.rate);
+}
+
+// Test that we apply codecs properly.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecs) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs.push_back(kRedCodec);
+ codecs[0].id = 96;
+ codecs[0].bitrate = 48000;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_EQ(48000, gcodec.rate);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(voe_.GetVAD(channel_num));
+ EXPECT_FALSE(voe_.GetFEC(channel_num));
+ EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
+ EXPECT_EQ(105, voe_.GetSendCNPayloadType(channel_num, true));
+ EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// TODO(pthatcher): Change failure behavior to returning false rather
+// than defaulting to PCMU.
+// Test that if clockrate is not 48000 for opus, we fail by fallback to PCMU.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBadClockrate) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 0;
+ codecs[0].clockrate = 50000;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("PCMU", gcodec.plname);
+}
+
+// Test that if channels=0 for opus, we fail by falling back to PCMU.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0ChannelsNoStereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 0;
+ codecs[0].channels = 0;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("PCMU", gcodec.plname);
+}
+
+// Test that if channels=0 for opus, we fail by falling back to PCMU.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad0Channels1Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 0;
+ codecs[0].channels = 0;
+ codecs[0].params["stereo"] = "1";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("PCMU", gcodec.plname);
+}
+
+// Test that if channel is 1 for opus and there's no stereo, we fail.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpus1ChannelNoStereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 0;
+ codecs[0].channels = 1;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("PCMU", gcodec.plname);
+}
+
+// Test that if channel is 1 for opus and stereo=0, we fail.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel0Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 0;
+ codecs[0].channels = 1;
+ codecs[0].params["stereo"] = "0";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("PCMU", gcodec.plname);
+}
+
+// Test that if channel is 1 for opus and stereo=1, we fail.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusBad1Channel1Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 0;
+ codecs[0].channels = 1;
+ codecs[0].params["stereo"] = "1";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("PCMU", gcodec.plname);
+}
+
+// Test that with bitrate=0 and no stereo,
+// channels and bitrate are 1 and 32000.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0BitrateNoStereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 0;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(32000, gcodec.rate);
+}
+
+// Test that with bitrate=0 and stereo=0,
+// channels and bitrate are 1 and 32000.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate0Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 0;
+ codecs[0].params["stereo"] = "0";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(32000, gcodec.rate);
+}
+
+// Test that with bitrate=invalid and stereo=0,
+// channels and bitrate are 1 and 32000.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate0Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].params["stereo"] = "0";
+ webrtc::CodecInst gcodec;
+
+ // bitrate that's out of the range between 6000 and 510000 will be considered
+ // as invalid and ignored.
+ codecs[0].bitrate = 5999;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(32000, gcodec.rate);
+
+ codecs[0].bitrate = 510001;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(32000, gcodec.rate);
+}
+
+// Test that with bitrate=0 and stereo=1,
+// channels and bitrate are 2 and 64000.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGood0Bitrate1Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 0;
+ codecs[0].params["stereo"] = "1";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(2, gcodec.channels);
+ EXPECT_EQ(64000, gcodec.rate);
+}
+
+// Test that with bitrate=invalid and stereo=1,
+// channels and bitrate are 2 and 64000.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodXBitrate1Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].params["stereo"] = "1";
+ webrtc::CodecInst gcodec;
+
+ // bitrate that's out of the range between 6000 and 510000 will be considered
+ // as invalid and ignored.
+ codecs[0].bitrate = 5999;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(2, gcodec.channels);
+ EXPECT_EQ(64000, gcodec.rate);
+
+ codecs[0].bitrate = 510001;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(2, gcodec.channels);
+ EXPECT_EQ(64000, gcodec.rate);
+}
+
+// Test that with bitrate=N and stereo unset,
+// channels and bitrate are 1 and N.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoStereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 96000;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(111, gcodec.pltype);
+ EXPECT_EQ(96000, gcodec.rate);
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(48000, gcodec.plfreq);
+}
+
+// Test that with bitrate=N and stereo=0,
+// channels and bitrate are 1 and N.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate0Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 30000;
+ codecs[0].params["stereo"] = "0";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(30000, gcodec.rate);
+ EXPECT_STREQ("opus", gcodec.plname);
+}
+
+// Test that with bitrate=N and without any parameters,
+// channels and bitrate are 1 and N.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrateNoParameters) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 30000;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(30000, gcodec.rate);
+ EXPECT_STREQ("opus", gcodec.plname);
+}
+
+// Test that with bitrate=N and stereo=1,
+// channels and bitrate are 2 and N.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusGoodNBitrate1Stereo) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 30000;
+ codecs[0].params["stereo"] = "1";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(2, gcodec.channels);
+ EXPECT_EQ(30000, gcodec.rate);
+ EXPECT_STREQ("opus", gcodec.plname);
+}
+
+// Test that bitrate will be overridden by the "maxaveragebitrate" parameter.
+// Also test that the "maxaveragebitrate" can't be set to values outside the
+// range of 6000 and 510000
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecOpusMaxAverageBitrate) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].bitrate = 30000;
+ webrtc::CodecInst gcodec;
+
+ // Ignore if less than 6000.
+ codecs[0].params["maxaveragebitrate"] = "5999";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(30000, gcodec.rate);
+
+ // Ignore if larger than 510000.
+ codecs[0].params["maxaveragebitrate"] = "510001";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(30000, gcodec.rate);
+
+ codecs[0].params["maxaveragebitrate"] = "200000";
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(200000, gcodec.rate);
+}
+
+// Test that we can enable NACK with opus.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNack) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack,
+ cricket::kParamValueEmpty));
+ EXPECT_FALSE(voe_.GetNACK(channel_num));
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(voe_.GetNACK(channel_num));
+}
+
+// Test that we can enable NACK on receive streams.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecEnableNackRecvStreams) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ int channel_num1 = voe_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int channel_num2 = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack,
+ cricket::kParamValueEmpty));
+ EXPECT_FALSE(voe_.GetNACK(channel_num1));
+ EXPECT_FALSE(voe_.GetNACK(channel_num2));
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(voe_.GetNACK(channel_num1));
+ EXPECT_TRUE(voe_.GetNACK(channel_num2));
+}
+
+// Test that we can disable NACK.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecDisableNack) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack,
+ cricket::kParamValueEmpty));
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(voe_.GetNACK(channel_num));
+
+ codecs.clear();
+ codecs.push_back(kOpusCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_FALSE(voe_.GetNACK(channel_num));
+}
+
+// Test that we can disable NACK on receive streams.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecDisableNackRecvStreams) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ int channel_num1 = voe_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int channel_num2 = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kOpusCodec);
+ codecs[0].AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack,
+ cricket::kParamValueEmpty));
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(voe_.GetNACK(channel_num1));
+ EXPECT_TRUE(voe_.GetNACK(channel_num2));
+
+ codecs.clear();
+ codecs.push_back(kOpusCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_FALSE(voe_.GetNACK(channel_num1));
+ EXPECT_FALSE(voe_.GetNACK(channel_num2));
+}
+
+// Test that NACK is enabled on a new receive stream.
+TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamEnableNack) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs[0].AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack,
+ cricket::kParamValueEmpty));
+ codecs.push_back(kCn16000Codec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(voe_.GetNACK(channel_num));
+
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ channel_num = voe_.GetLastChannel();
+ EXPECT_TRUE(voe_.GetNACK(channel_num));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(3)));
+ channel_num = voe_.GetLastChannel();
+ EXPECT_TRUE(voe_.GetNACK(channel_num));
+}
+
+// Test that we can apply CELT with stereo mode but fail with mono mode.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCelt) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kCeltCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 96;
+ codecs[0].channels = 2;
+ codecs[0].bitrate = 96000;
+ codecs[1].bitrate = 96000;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_EQ(96000, gcodec.rate);
+ EXPECT_EQ(2, gcodec.channels);
+ EXPECT_STREQ("CELT", gcodec.plname);
+ // Doesn't support mono, expect it to fall back to the next codec in the list.
+ codecs[0].channels = 1;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(0, gcodec.pltype);
+ EXPECT_EQ(1, gcodec.channels);
+ EXPECT_EQ(64000, gcodec.rate);
+ EXPECT_STREQ("PCMU", gcodec.plname);
+}
+
+// Test that we can switch back and forth between CELT and ISAC with CN.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsIsacCeltSwitching) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> celt_codecs;
+ celt_codecs.push_back(kCeltCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(celt_codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(110, gcodec.pltype);
+ EXPECT_STREQ("CELT", gcodec.plname);
+
+ std::vector<cricket::AudioCodec> isac_codecs;
+ isac_codecs.push_back(kIsacCodec);
+ isac_codecs.push_back(kCn16000Codec);
+ isac_codecs.push_back(kCeltCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(isac_codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(103, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+
+ EXPECT_TRUE(channel_->SetSendCodecs(celt_codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(110, gcodec.pltype);
+ EXPECT_STREQ("CELT", gcodec.plname);
+}
+
+// Test that we handle various ways of specifying bitrate.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBitrate) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec); // bitrate == 32000
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(103, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_EQ(32000, gcodec.rate);
+
+ codecs[0].bitrate = 0; // bitrate == default
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(103, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_EQ(-1, gcodec.rate);
+
+ codecs[0].bitrate = 28000; // bitrate == 28000
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(103, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_EQ(28000, gcodec.rate);
+
+ codecs[0] = kPcmuCodec; // bitrate == 64000
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(0, gcodec.pltype);
+ EXPECT_STREQ("PCMU", gcodec.plname);
+ EXPECT_EQ(64000, gcodec.rate);
+
+ codecs[0].bitrate = 0; // bitrate == default
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(0, gcodec.pltype);
+ EXPECT_STREQ("PCMU", gcodec.plname);
+ EXPECT_EQ(64000, gcodec.rate);
+
+ codecs[0] = kOpusCodec;
+ codecs[0].bitrate = 0; // bitrate == default
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(111, gcodec.pltype);
+ EXPECT_STREQ("opus", gcodec.plname);
+ EXPECT_EQ(32000, gcodec.rate);
+}
+
+// Test that we fall back to PCMU if no codecs are specified.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsNoCodecs) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(0, gcodec.pltype);
+ EXPECT_STREQ("PCMU", gcodec.plname);
+ EXPECT_FALSE(voe_.GetVAD(channel_num));
+ EXPECT_FALSE(voe_.GetFEC(channel_num));
+ EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
+ EXPECT_EQ(105, voe_.GetSendCNPayloadType(channel_num, true));
+ EXPECT_EQ(106, voe_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we set VAD and DTMF types correctly.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNandDTMF) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ // TODO(juberti): cn 32000
+ codecs.push_back(kCn16000Codec);
+ codecs.push_back(kCn8000Codec);
+ codecs.push_back(kTelephoneEventCodec);
+ codecs.push_back(kRedCodec);
+ codecs[0].id = 96;
+ codecs[2].id = 97; // wideband CN
+ codecs[4].id = 98; // DTMF
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(voe_.GetVAD(channel_num));
+ EXPECT_FALSE(voe_.GetFEC(channel_num));
+ EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
+ EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
+ EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we only apply VAD if we have a CN codec that matches the
+// send codec clockrate.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCNNoMatch) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ // Set ISAC(16K) and CN(16K). VAD should be activated.
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kCn16000Codec);
+ codecs[1].id = 97;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(voe_.GetVAD(channel_num));
+ EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
+ // Set PCMU(8K) and CN(16K). VAD should not be activated.
+ codecs[0] = kPcmuCodec;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("PCMU", gcodec.plname);
+ EXPECT_FALSE(voe_.GetVAD(channel_num));
+ // Set PCMU(8K) and CN(8K). VAD should be activated.
+ codecs[1] = kCn8000Codec;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("PCMU", gcodec.plname);
+ EXPECT_TRUE(voe_.GetVAD(channel_num));
+ EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
+ // Set ISAC(16K) and CN(8K). VAD should not be activated.
+ codecs[0] = kIsacCodec;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(voe_.GetVAD(channel_num));
+}
+
+// Test that we perform case-insensitive matching of codec names.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsCaseInsensitive) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs.push_back(kCn16000Codec);
+ codecs.push_back(kCn8000Codec);
+ codecs.push_back(kTelephoneEventCodec);
+ codecs.push_back(kRedCodec);
+ codecs[0].name = "iSaC";
+ codecs[0].id = 96;
+ codecs[2].id = 97; // wideband CN
+ codecs[4].id = 98; // DTMF
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(voe_.GetVAD(channel_num));
+ EXPECT_FALSE(voe_.GetFEC(channel_num));
+ EXPECT_EQ(13, voe_.GetSendCNPayloadType(channel_num, false));
+ EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
+ EXPECT_EQ(98, voe_.GetSendTelephoneEventPayloadType(channel_num));
+}
+
+// Test that we set up FEC correctly.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsRED) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "96/96";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(voe_.GetFEC(channel_num));
+ EXPECT_EQ(127, voe_.GetSendFECPayloadType(channel_num));
+}
+
+// Test that we set up FEC correctly if params are omitted.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsREDNoParams) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(voe_.GetFEC(channel_num));
+ EXPECT_EQ(127, voe_.GetSendFECPayloadType(channel_num));
+}
+
+// Test that we ignore RED if the parameters aren't named the way we expect.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBadRED1) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params["ABC"] = "96/96";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it uses different primary/secondary encoding.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBadRED2) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "96/0";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it uses more than 2 encodings.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBadRED3) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "96/96/96";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it has bogus codec ids.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBadRED4) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "ABC/ABC";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we ignore RED if it refers to a codec that is not present.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsBadRED5) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kRedCodec);
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ codecs[0].id = 127;
+ codecs[0].params[""] = "97/97";
+ codecs[1].id = 96;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ webrtc::CodecInst gcodec;
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_EQ(96, gcodec.pltype);
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_FALSE(voe_.GetFEC(channel_num));
+}
+
+// Test that we support setting an empty list of recv header extensions.
+TEST_F(WebRtcVoiceEngineTestFake, SetRecvRtpHeaderExtensions) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::RtpHeaderExtension> extensions;
+ int channel_num = voe_.GetLastChannel();
+ bool enable = false;
+ unsigned char id = 0;
+
+ // An empty list shouldn't cause audio-level headers to be enabled.
+ EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+ channel_num, enable, id));
+ EXPECT_FALSE(enable);
+
+ // Nor should indicating we can receive the audio-level header.
+ extensions.push_back(cricket::RtpHeaderExtension(
+ "urn:ietf:params:rtp-hdrext:ssrc-audio-level", 8));
+ EXPECT_TRUE(channel_->SetRecvRtpHeaderExtensions(extensions));
+ EXPECT_EQ(0, voe_.GetRTPAudioLevelIndicationStatus(
+ channel_num, enable, id));
+ EXPECT_FALSE(enable);
+}
+
+// Test that we support setting certain send header extensions.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendRtpHeaderExtensions) {
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::RtpHeaderExtension> extensions;
+ int channel_num = voe_.GetLastChannel();
+ TestSetSendRtpHeaderExtensions(channel_num);
+}
+
+// Test that we can create a channel and start sending/playing out on it.
+TEST_F(WebRtcVoiceEngineTestFake, SendAndPlayout) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(voe_.GetSend(channel_num));
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+ EXPECT_FALSE(voe_.GetSend(channel_num));
+ EXPECT_TRUE(channel_->SetPlayout(false));
+ EXPECT_FALSE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can add and remove send streams.
+TEST_F(WebRtcVoiceEngineTestFake, CreateAndDeleteMultipleSendStreams) {
+ SetupForMultiSendStream();
+
+ static const uint32 kSsrcs4[] = {1, 2, 3, 4};
+
+ // Set the global state for sending.
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
+
+ // Verify that we are in a sending state for all the created streams.
+ int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
+ EXPECT_TRUE(voe_.GetSend(channel_num));
+ }
+
+ // Remove the first send channel, which is the default channel. It will only
+ // recycle the default channel but not delete it.
+ EXPECT_TRUE(channel_->RemoveSendStream(kSsrcs4[0]));
+ // Stream should already be Removed from the send stream list.
+ EXPECT_FALSE(channel_->RemoveSendStream(kSsrcs4[0]));
+ // But the default still exists.
+ EXPECT_EQ(0, voe_.GetChannelFromLocalSsrc(kSsrcs4[0]));
+
+ // Delete the rest of send channel streams.
+ for (unsigned int i = 1; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ EXPECT_TRUE(channel_->RemoveSendStream(kSsrcs4[i]));
+ // Stream should already be deleted.
+ EXPECT_FALSE(channel_->RemoveSendStream(kSsrcs4[i]));
+ EXPECT_EQ(-1, voe_.GetChannelFromLocalSsrc(kSsrcs4[i]));
+ }
+}
+
+// Test SetSendCodecs correctly configure the codecs in all send streams.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendCodecsWithMultipleSendStreams) {
+ SetupForMultiSendStream();
+
+ static const uint32 kSsrcs4[] = {1, 2, 3, 4};
+ // Create send streams.
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
+ }
+
+ std::vector<cricket::AudioCodec> codecs;
+ // Set ISAC(16K) and CN(16K). VAD should be activated.
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kCn16000Codec);
+ codecs[1].id = 97;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ // Verify ISAC and VAD are corrected configured on all send channels.
+ webrtc::CodecInst gcodec;
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("ISAC", gcodec.plname);
+ EXPECT_TRUE(voe_.GetVAD(channel_num));
+ EXPECT_EQ(97, voe_.GetSendCNPayloadType(channel_num, true));
+ }
+
+ // Change to PCMU(8K) and CN(16K). VAD should not be activated.
+ codecs[0] = kPcmuCodec;
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
+ EXPECT_EQ(0, voe_.GetSendCodec(channel_num, gcodec));
+ EXPECT_STREQ("PCMU", gcodec.plname);
+ EXPECT_FALSE(voe_.GetVAD(channel_num));
+ }
+}
+
+// Test we can SetSend on all send streams correctly.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendWithMultipleSendStreams) {
+ SetupForMultiSendStream();
+
+ static const uint32 kSsrcs4[] = {1, 2, 3, 4};
+ // Create the send channels and they should be a SEND_NOTHING date.
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
+ int channel_num = voe_.GetLastChannel();
+ EXPECT_FALSE(voe_.GetSend(channel_num));
+ }
+
+ // Set the global state for starting sending.
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ // Verify that we are in a sending state for all the send streams.
+ int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
+ EXPECT_TRUE(voe_.GetSend(channel_num));
+ }
+
+ // Set the global state for stopping sending.
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+ for (unsigned int i = 1; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ // Verify that we are in a stop state for all the send streams.
+ int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
+ EXPECT_FALSE(voe_.GetSend(channel_num));
+ }
+}
+
+// Test we can set the correct statistics on all send streams.
+TEST_F(WebRtcVoiceEngineTestFake, GetStatsWithMultipleSendStreams) {
+ SetupForMultiSendStream();
+
+ static const uint32 kSsrcs4[] = {1, 2, 3, 4};
+ // Create send streams.
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
+ }
+
+ // We need send codec to be set to get all stats.
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ cricket::VoiceMediaInfo info;
+ EXPECT_EQ(true, channel_->GetStats(&info));
+ EXPECT_EQ(static_cast<size_t>(ARRAY_SIZE(kSsrcs4)), info.senders.size());
+
+ // Verify the statistic information is correct.
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ EXPECT_EQ(kSsrcs4[i], info.senders[i].ssrc);
+ EXPECT_EQ(kPcmuCodec.name, info.senders[i].codec_name);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[i].bytes_sent);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[i].packets_sent);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[i].packets_lost);
+ EXPECT_EQ(cricket::kFractionLostStatValue, info.senders[i].fraction_lost);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[i].ext_seqnum);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[i].rtt_ms);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[i].jitter_ms);
+ }
+
+ EXPECT_EQ(1u, info.receivers.size());
+}
+
+// Test that we support setting certain send header extensions on multiple
+// send streams.
+TEST_F(WebRtcVoiceEngineTestFake,
+ SetSendRtpHeaderExtensionsWithMultpleSendStreams) {
+ SetupForMultiSendStream();
+
+ static const uint32 kSsrcs4[] = {1, 2, 3, 4};
+ // Create send streams.
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(kSsrcs4[i])));
+ }
+
+ // Test SendRtpHeaderExtensions on each send channel.
+ for (unsigned int i = 0; i < ARRAY_SIZE(kSsrcs4); ++i) {
+ int channel_num = voe_.GetChannelFromLocalSsrc(kSsrcs4[i]);
+ TestSetSendRtpHeaderExtensions(channel_num);
+ }
+}
+
+// Test that we can add and remove receive streams, and do proper send/playout.
+// We can receive on multiple streams while sending one stream.
+TEST_F(WebRtcVoiceEngineTestFake, PlayoutWithMultipleStreams) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num1 = voe_.GetLastChannel();
+
+ // Start playout on the default channel.
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num1));
+
+ // Adding another stream should disable playout on the default channel.
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int channel_num2 = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(voe_.GetSend(channel_num1));
+ EXPECT_FALSE(voe_.GetSend(channel_num2));
+
+ // Make sure only the new channel is played out.
+ EXPECT_FALSE(voe_.GetPlayout(channel_num1));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num2));
+
+ // Adding yet another stream should have stream 2 and 3 enabled for playout.
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(3)));
+ int channel_num3 = voe_.GetLastChannel();
+ EXPECT_FALSE(voe_.GetPlayout(channel_num1));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num2));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num3));
+ EXPECT_FALSE(voe_.GetSend(channel_num3));
+
+ // Stop sending.
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+ EXPECT_FALSE(voe_.GetSend(channel_num1));
+ EXPECT_FALSE(voe_.GetSend(channel_num2));
+ EXPECT_FALSE(voe_.GetSend(channel_num3));
+
+ // Stop playout.
+ EXPECT_TRUE(channel_->SetPlayout(false));
+ EXPECT_FALSE(voe_.GetPlayout(channel_num1));
+ EXPECT_FALSE(voe_.GetPlayout(channel_num2));
+ EXPECT_FALSE(voe_.GetPlayout(channel_num3));
+
+ // Restart playout and make sure the default channel still is not played out.
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_FALSE(voe_.GetPlayout(channel_num1));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num2));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num3));
+
+ // Now remove the new streams and verify that the default channel is
+ // played out again.
+ EXPECT_TRUE(channel_->RemoveRecvStream(3));
+ EXPECT_TRUE(channel_->RemoveRecvStream(2));
+
+ EXPECT_TRUE(voe_.GetPlayout(channel_num1));
+}
+
+// Test that we can set the devices to use.
+TEST_F(WebRtcVoiceEngineTestFake, SetDevices) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ cricket::Device default_dev(cricket::kFakeDefaultDeviceName,
+ cricket::kFakeDefaultDeviceId);
+ cricket::Device dev(cricket::kFakeDeviceName,
+ cricket::kFakeDeviceId);
+
+ // Test SetDevices() while not sending or playing.
+ EXPECT_TRUE(engine_.SetDevices(&default_dev, &default_dev));
+
+ // Test SetDevices() while sending and playing.
+ EXPECT_TRUE(engine_.SetLocalMonitor(true));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(voe_.GetRecordingMicrophone());
+ EXPECT_TRUE(voe_.GetSend(channel_num));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num));
+
+ EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
+
+ EXPECT_TRUE(voe_.GetRecordingMicrophone());
+ EXPECT_TRUE(voe_.GetSend(channel_num));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num));
+
+ // Test that failure to open newly selected devices does not prevent opening
+ // ones after that.
+ voe_.set_fail_start_recording_microphone(true);
+ voe_.set_playout_fail_channel(channel_num);
+ voe_.set_send_fail_channel(channel_num);
+
+ EXPECT_FALSE(engine_.SetDevices(&default_dev, &default_dev));
+
+ EXPECT_FALSE(voe_.GetRecordingMicrophone());
+ EXPECT_FALSE(voe_.GetSend(channel_num));
+ EXPECT_FALSE(voe_.GetPlayout(channel_num));
+
+ voe_.set_fail_start_recording_microphone(false);
+ voe_.set_playout_fail_channel(-1);
+ voe_.set_send_fail_channel(-1);
+
+ EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
+
+ EXPECT_TRUE(voe_.GetRecordingMicrophone());
+ EXPECT_TRUE(voe_.GetSend(channel_num));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can set the devices to use even if we failed to
+// open the initial ones.
+TEST_F(WebRtcVoiceEngineTestFake, SetDevicesWithInitiallyBadDevices) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ cricket::Device default_dev(cricket::kFakeDefaultDeviceName,
+ cricket::kFakeDefaultDeviceId);
+ cricket::Device dev(cricket::kFakeDeviceName,
+ cricket::kFakeDeviceId);
+
+ // Test that failure to open devices selected before starting
+ // send/play does not prevent opening newly selected ones after that.
+ voe_.set_fail_start_recording_microphone(true);
+ voe_.set_playout_fail_channel(channel_num);
+ voe_.set_send_fail_channel(channel_num);
+
+ EXPECT_TRUE(engine_.SetDevices(&default_dev, &default_dev));
+
+ EXPECT_FALSE(engine_.SetLocalMonitor(true));
+ EXPECT_FALSE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_FALSE(channel_->SetPlayout(true));
+ EXPECT_FALSE(voe_.GetRecordingMicrophone());
+ EXPECT_FALSE(voe_.GetSend(channel_num));
+ EXPECT_FALSE(voe_.GetPlayout(channel_num));
+
+ voe_.set_fail_start_recording_microphone(false);
+ voe_.set_playout_fail_channel(-1);
+ voe_.set_send_fail_channel(-1);
+
+ EXPECT_TRUE(engine_.SetDevices(&dev, &dev));
+
+ EXPECT_TRUE(voe_.GetRecordingMicrophone());
+ EXPECT_TRUE(voe_.GetSend(channel_num));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can create a channel configured for multi-point conferences,
+// and start sending/playing out on it.
+TEST_F(WebRtcVoiceEngineTestFake, ConferenceSendAndPlayout) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(voe_.GetSend(channel_num));
+}
+
+// Test that we can create a channel configured for Codian bridges,
+// and start sending/playing out on it.
+TEST_F(WebRtcVoiceEngineTestFake, CodianSendAndPlayout) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ webrtc::AgcConfig agc_config;
+ EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
+ EXPECT_EQ(0, agc_config.targetLeveldBOv);
+ EXPECT_TRUE(channel_->SetOptions(options_adjust_agc_));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(voe_.GetSend(channel_num));
+ EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
+ EXPECT_EQ(agc_config.targetLeveldBOv, 10); // level was attenuated
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ EXPECT_TRUE(voe_.GetPlayout(channel_num));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_NOTHING));
+ EXPECT_FALSE(voe_.GetSend(channel_num));
+ EXPECT_EQ(0, voe_.GetAgcConfig(agc_config));
+ EXPECT_EQ(0, agc_config.targetLeveldBOv); // level was restored
+ EXPECT_TRUE(channel_->SetPlayout(false));
+ EXPECT_FALSE(voe_.GetPlayout(channel_num));
+}
+
+// Test that we can set the outgoing SSRC properly.
+// SSRC is set in SetupEngine by calling AddSendStream.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrc) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ unsigned int send_ssrc;
+ EXPECT_EQ(0, voe_.GetLocalSSRC(channel_num, send_ssrc));
+ EXPECT_NE(0U, send_ssrc);
+ EXPECT_EQ(0, voe_.GetLocalSSRC(channel_num, send_ssrc));
+ EXPECT_EQ(kSsrc1, send_ssrc);
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, GetStats) {
+ // Setup. We need send codec to be set to get all stats.
+ EXPECT_TRUE(SetupEngine());
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+
+ cricket::VoiceMediaInfo info;
+ EXPECT_EQ(true, channel_->GetStats(&info));
+ EXPECT_EQ(1u, info.senders.size());
+ EXPECT_EQ(kSsrc1, info.senders[0].ssrc);
+ EXPECT_EQ(kPcmuCodec.name, info.senders[0].codec_name);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[0].bytes_sent);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[0].packets_sent);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[0].packets_lost);
+ EXPECT_EQ(cricket::kFractionLostStatValue, info.senders[0].fraction_lost);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[0].ext_seqnum);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[0].rtt_ms);
+ EXPECT_EQ(cricket::kIntStatValue, info.senders[0].jitter_ms);
+ // TODO(sriniv): Add testing for more fields. These are not populated
+ // in FakeWebrtcVoiceEngine yet.
+ // EXPECT_EQ(cricket::kIntStatValue, info.senders[0].audio_level);
+ // EXPECT_EQ(cricket::kIntStatValue, info.senders[0].echo_delay_median_ms);
+ // EXPECT_EQ(cricket::kIntStatValue, info.senders[0].echo_delay_std_ms);
+ // EXPECT_EQ(cricket::kIntStatValue, info.senders[0].echo_return_loss);
+ // EXPECT_EQ(cricket::kIntStatValue,
+ // info.senders[0].echo_return_loss_enhancement);
+
+ EXPECT_EQ(1u, info.receivers.size());
+ // TODO(sriniv): Add testing for receiver fields.
+}
+
+// Test that we can set the outgoing SSRC properly with multiple streams.
+// SSRC is set in SetupEngine by calling AddSendStream.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrcWithMultipleStreams) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ int channel_num1 = voe_.GetLastChannel();
+ unsigned int send_ssrc;
+ EXPECT_EQ(0, voe_.GetLocalSSRC(channel_num1, send_ssrc));
+ EXPECT_EQ(kSsrc1, send_ssrc);
+
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int channel_num2 = voe_.GetLastChannel();
+ EXPECT_EQ(0, voe_.GetLocalSSRC(channel_num2, send_ssrc));
+ EXPECT_EQ(kSsrc1, send_ssrc);
+}
+
+// Test that the local SSRC is the same on sending and receiving channels if the
+// receive channel is created before the send channel.
+TEST_F(WebRtcVoiceEngineTestFake, SetSendSsrcAfterCreatingReceiveChannel) {
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ channel_ = engine_.CreateChannel();
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ int receive_channel_num = voe_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddSendStream(
+ cricket::StreamParams::CreateLegacy(1234)));
+ int send_channel_num = voe_.GetLastChannel();
+
+ unsigned int ssrc = 0;
+ EXPECT_EQ(0, voe_.GetLocalSSRC(send_channel_num, ssrc));
+ EXPECT_EQ(1234U, ssrc);
+ ssrc = 0;
+ EXPECT_EQ(0, voe_.GetLocalSSRC(receive_channel_num, ssrc));
+ EXPECT_EQ(1234U, ssrc);
+}
+
+// Test that we can properly receive packets.
+TEST_F(WebRtcVoiceEngineTestFake, Recv) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num, kPcmuFrame,
+ sizeof(kPcmuFrame)));
+}
+
+// Test that we can properly receive packets on multiple streams.
+TEST_F(WebRtcVoiceEngineTestFake, RecvWithMultipleStreams) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ int channel_num1 = voe_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int channel_num2 = voe_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(3)));
+ int channel_num3 = voe_.GetLastChannel();
+ // Create packets with the right SSRCs.
+ char packets[4][sizeof(kPcmuFrame)];
+ for (size_t i = 0; i < ARRAY_SIZE(packets); ++i) {
+ memcpy(packets[i], kPcmuFrame, sizeof(kPcmuFrame));
+ talk_base::SetBE32(packets[i] + 8, i);
+ }
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+ DeliverPacket(packets[0], sizeof(packets[0]));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+ DeliverPacket(packets[1], sizeof(packets[1]));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num1, packets[1],
+ sizeof(packets[1])));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+ DeliverPacket(packets[2], sizeof(packets[2]));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num2, packets[2],
+ sizeof(packets[2])));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num3));
+ DeliverPacket(packets[3], sizeof(packets[3]));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num1));
+ EXPECT_TRUE(voe_.CheckNoPacket(channel_num2));
+ EXPECT_TRUE(voe_.CheckPacket(channel_num3, packets[3],
+ sizeof(packets[3])));
+ EXPECT_TRUE(channel_->RemoveRecvStream(3));
+ EXPECT_TRUE(channel_->RemoveRecvStream(2));
+ EXPECT_TRUE(channel_->RemoveRecvStream(1));
+}
+
+// Test that we properly handle failures to add a stream.
+TEST_F(WebRtcVoiceEngineTestFake, AddStreamFail) {
+ EXPECT_TRUE(SetupEngine());
+ voe_.set_fail_create_channel(true);
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ EXPECT_FALSE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+
+ // In 1:1 call, we should not try to create a new channel.
+ cricket::AudioOptions options_no_conference_;
+ options_no_conference_.conference_mode.Set(false);
+ EXPECT_TRUE(channel_->SetOptions(options_no_conference_));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+}
+
+// Test that AddRecvStream doesn't create new channel for 1:1 call.
+TEST_F(WebRtcVoiceEngineTestFake, AddRecvStream1On1) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_EQ(channel_num, voe_.GetLastChannel());
+}
+
+// Test that after adding a recv stream, we do not decode more codecs than
+// those previously passed into SetRecvCodecs.
+TEST_F(WebRtcVoiceEngineTestFake, AddRecvStreamUnsupportedCodec) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kIsacCodec);
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetRecvCodecs(codecs));
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc1)));
+ int channel_num2 = voe_.GetLastChannel();
+ webrtc::CodecInst gcodec;
+ talk_base::strcpyn(gcodec.plname, ARRAY_SIZE(gcodec.plname), "CELT");
+ gcodec.plfreq = 32000;
+ gcodec.channels = 2;
+ EXPECT_EQ(-1, voe_.GetRecPayloadType(channel_num2, gcodec));
+}
+
+// Test that we properly clean up any streams that were added, even if
+// not explicitly removed.
+TEST_F(WebRtcVoiceEngineTestFake, StreamCleanup) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ EXPECT_EQ(3, voe_.GetNumChannels()); // default channel + 2 added
+ delete channel_;
+ channel_ = NULL;
+ EXPECT_EQ(0, voe_.GetNumChannels());
+}
+
+// Test the InsertDtmf on default send stream.
+TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnDefaultSendStream) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ TestInsertDtmf(0, channel_num);
+}
+
+// Test the InsertDtmf on specified send stream.
+TEST_F(WebRtcVoiceEngineTestFake, InsertDtmfOnSendStream) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ TestInsertDtmf(kSsrc1, channel_num);
+}
+
+// Test that we can play a ringback tone properly in a single-stream call.
+TEST_F(WebRtcVoiceEngineTestFake, PlayRingback) {
+ EXPECT_TRUE(SetupEngine());
+ int channel_num = voe_.GetLastChannel();
+ EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+ // Check we fail if no ringback tone specified.
+ EXPECT_FALSE(channel_->PlayRingbackTone(0, true, true));
+ EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+ // Check we can set and play a ringback tone.
+ EXPECT_TRUE(channel_->SetRingbackTone(kRingbackTone, strlen(kRingbackTone)));
+ EXPECT_TRUE(channel_->PlayRingbackTone(0, true, true));
+ EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+ // Check we can stop the tone manually.
+ EXPECT_TRUE(channel_->PlayRingbackTone(0, false, false));
+ EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+ // Check we stop the tone if a packet arrives.
+ EXPECT_TRUE(channel_->PlayRingbackTone(0, true, true));
+ EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+}
+
+// Test that we can play a ringback tone properly in a multi-stream call.
+TEST_F(WebRtcVoiceEngineTestFake, PlayRingbackWithMultipleStreams) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ int channel_num = voe_.GetLastChannel();
+ EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+ // Check we fail if no ringback tone specified.
+ EXPECT_FALSE(channel_->PlayRingbackTone(2, true, true));
+ EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+ // Check we can set and play a ringback tone on the correct ssrc.
+ EXPECT_TRUE(channel_->SetRingbackTone(kRingbackTone, strlen(kRingbackTone)));
+ EXPECT_FALSE(channel_->PlayRingbackTone(77, true, true));
+ EXPECT_TRUE(channel_->PlayRingbackTone(2, true, true));
+ EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+ // Check we can stop the tone manually.
+ EXPECT_TRUE(channel_->PlayRingbackTone(2, false, false));
+ EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+ // Check we stop the tone if a packet arrives, but only with the right SSRC.
+ EXPECT_TRUE(channel_->PlayRingbackTone(2, true, true));
+ EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+ // Send a packet with SSRC 1; the tone should not stop.
+ DeliverPacket(kPcmuFrame, sizeof(kPcmuFrame));
+ EXPECT_EQ(1, voe_.IsPlayingFileLocally(channel_num));
+ // Send a packet with SSRC 2; the tone should stop.
+ char packet[sizeof(kPcmuFrame)];
+ memcpy(packet, kPcmuFrame, sizeof(kPcmuFrame));
+ talk_base::SetBE32(packet + 8, 2);
+ DeliverPacket(packet, sizeof(packet));
+ EXPECT_EQ(0, voe_.IsPlayingFileLocally(channel_num));
+}
+
+// Tests creating soundclips, and make sure they come from the right engine.
+TEST_F(WebRtcVoiceEngineTestFake, CreateSoundclip) {
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ soundclip_ = engine_.CreateSoundclip();
+ ASSERT_TRUE(soundclip_ != NULL);
+ EXPECT_EQ(0, voe_.GetNumChannels());
+ EXPECT_EQ(1, voe_sc_.GetNumChannels());
+ int channel_num = voe_sc_.GetLastChannel();
+ EXPECT_TRUE(voe_sc_.GetPlayout(channel_num));
+ delete soundclip_;
+ soundclip_ = NULL;
+ EXPECT_EQ(0, voe_sc_.GetNumChannels());
+}
+
+// Tests playing out a fake sound.
+TEST_F(WebRtcVoiceEngineTestFake, PlaySoundclip) {
+ static const char kZeroes[16000] = {};
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+ soundclip_ = engine_.CreateSoundclip();
+ ASSERT_TRUE(soundclip_ != NULL);
+ EXPECT_TRUE(soundclip_->PlaySound(kZeroes, sizeof(kZeroes), 0));
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, MediaEngineCallbackOnError) {
+ talk_base::scoped_ptr<ChannelErrorListener> listener;
+ cricket::WebRtcVoiceMediaChannel* media_channel;
+ unsigned int ssrc = 0;
+
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+
+ media_channel = static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
+ listener.reset(new ChannelErrorListener(channel_));
+
+ // Test on WebRtc VoE channel.
+ voe_.TriggerCallbackOnError(media_channel->voe_channel(),
+ VE_SATURATION_WARNING);
+ EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_DEVICE_SATURATION,
+ listener->error());
+ EXPECT_NE(-1, voe_.GetLocalSSRC(voe_.GetLastChannel(), ssrc));
+ EXPECT_EQ(ssrc, listener->ssrc());
+
+ listener->Reset();
+ voe_.TriggerCallbackOnError(-1, VE_TYPING_NOISE_WARNING);
+ EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_TYPING_NOISE_DETECTED,
+ listener->error());
+ EXPECT_EQ(0U, listener->ssrc());
+
+ // Add another stream and test on that.
+ ++ssrc;
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(
+ ssrc)));
+ listener->Reset();
+ voe_.TriggerCallbackOnError(voe_.GetLastChannel(),
+ VE_SATURATION_WARNING);
+ EXPECT_EQ(cricket::VoiceMediaChannel::ERROR_REC_DEVICE_SATURATION,
+ listener->error());
+ EXPECT_EQ(ssrc, listener->ssrc());
+
+ // Testing a non-existing channel.
+ listener->Reset();
+ voe_.TriggerCallbackOnError(voe_.GetLastChannel() + 2,
+ VE_SATURATION_WARNING);
+ EXPECT_EQ(0, listener->error());
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, TestSetPlayoutError) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ std::vector<cricket::AudioCodec> codecs;
+ codecs.push_back(kPcmuCodec);
+ EXPECT_TRUE(channel_->SetSendCodecs(codecs));
+ EXPECT_TRUE(channel_->SetSend(cricket::SEND_MICROPHONE));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(2)));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(3)));
+ EXPECT_TRUE(channel_->SetPlayout(true));
+ voe_.set_playout_fail_channel(voe_.GetLastChannel() - 1);
+ EXPECT_TRUE(channel_->SetPlayout(false));
+ EXPECT_FALSE(channel_->SetPlayout(true));
+}
+
+// Test that the Registering/Unregistering with the
+// webrtcvoiceengine works as expected
+TEST_F(WebRtcVoiceEngineTestFake, RegisterVoiceProcessor) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ EXPECT_TRUE(channel_->AddRecvStream(
+ cricket::StreamParams::CreateLegacy(kSsrc2)));
+ cricket::FakeMediaProcessor vp_1;
+ cricket::FakeMediaProcessor vp_2;
+
+ EXPECT_FALSE(engine_.RegisterProcessor(kSsrc2, &vp_1, cricket::MPD_TX));
+ EXPECT_TRUE(engine_.RegisterProcessor(kSsrc2, &vp_1, cricket::MPD_RX));
+ EXPECT_TRUE(engine_.RegisterProcessor(kSsrc2, &vp_2, cricket::MPD_RX));
+ voe_.TriggerProcessPacket(cricket::MPD_RX);
+ voe_.TriggerProcessPacket(cricket::MPD_TX);
+
+ EXPECT_TRUE(voe_.IsExternalMediaProcessorRegistered());
+ EXPECT_EQ(1, vp_1.voice_frame_count());
+ EXPECT_EQ(1, vp_2.voice_frame_count());
+
+ EXPECT_TRUE(engine_.UnregisterProcessor(kSsrc2,
+ &vp_2,
+ cricket::MPD_RX));
+ voe_.TriggerProcessPacket(cricket::MPD_RX);
+ EXPECT_TRUE(voe_.IsExternalMediaProcessorRegistered());
+ EXPECT_EQ(1, vp_2.voice_frame_count());
+ EXPECT_EQ(2, vp_1.voice_frame_count());
+
+ EXPECT_TRUE(engine_.UnregisterProcessor(kSsrc2,
+ &vp_1,
+ cricket::MPD_RX));
+ voe_.TriggerProcessPacket(cricket::MPD_RX);
+ EXPECT_FALSE(voe_.IsExternalMediaProcessorRegistered());
+ EXPECT_EQ(2, vp_1.voice_frame_count());
+
+ EXPECT_FALSE(engine_.RegisterProcessor(kSsrc1, &vp_1, cricket::MPD_RX));
+ EXPECT_TRUE(engine_.RegisterProcessor(kSsrc1, &vp_1, cricket::MPD_TX));
+ voe_.TriggerProcessPacket(cricket::MPD_RX);
+ voe_.TriggerProcessPacket(cricket::MPD_TX);
+ EXPECT_TRUE(voe_.IsExternalMediaProcessorRegistered());
+ EXPECT_EQ(3, vp_1.voice_frame_count());
+
+ EXPECT_TRUE(engine_.UnregisterProcessor(kSsrc1,
+ &vp_1,
+ cricket::MPD_RX_AND_TX));
+ voe_.TriggerProcessPacket(cricket::MPD_TX);
+ EXPECT_FALSE(voe_.IsExternalMediaProcessorRegistered());
+ EXPECT_EQ(3, vp_1.voice_frame_count());
+ EXPECT_TRUE(channel_->RemoveRecvStream(kSsrc2));
+ EXPECT_FALSE(engine_.RegisterProcessor(kSsrc2, &vp_1, cricket::MPD_RX));
+ EXPECT_FALSE(voe_.IsExternalMediaProcessorRegistered());
+
+ // Test that we can register a processor on the receive channel on SSRC 0.
+ // This tests the 1:1 case when the receive SSRC is unknown.
+ EXPECT_TRUE(engine_.RegisterProcessor(0, &vp_1, cricket::MPD_RX));
+ voe_.TriggerProcessPacket(cricket::MPD_RX);
+ EXPECT_TRUE(voe_.IsExternalMediaProcessorRegistered());
+ EXPECT_EQ(4, vp_1.voice_frame_count());
+ EXPECT_TRUE(engine_.UnregisterProcessor(0,
+ &vp_1,
+ cricket::MPD_RX));
+
+ // The following tests test that FindChannelNumFromSsrc is doing
+ // what we expect.
+ // pick an invalid ssrc and make sure we can't register
+ EXPECT_FALSE(engine_.RegisterProcessor(99,
+ &vp_1,
+ cricket::MPD_RX));
+ EXPECT_TRUE(channel_->AddRecvStream(cricket::StreamParams::CreateLegacy(1)));
+ EXPECT_TRUE(engine_.RegisterProcessor(1,
+ &vp_1,
+ cricket::MPD_RX));
+ EXPECT_TRUE(engine_.UnregisterProcessor(1,
+ &vp_1,
+ cricket::MPD_RX));
+ EXPECT_FALSE(engine_.RegisterProcessor(1,
+ &vp_1,
+ cricket::MPD_TX));
+ EXPECT_TRUE(channel_->RemoveRecvStream(1));
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, SetAudioOptions) {
+ EXPECT_TRUE(SetupEngine());
+
+ bool ec_enabled;
+ webrtc::EcModes ec_mode;
+ bool ec_metrics_enabled;
+ webrtc::AecmModes aecm_mode;
+ bool cng_enabled;
+ bool agc_enabled;
+ webrtc::AgcModes agc_mode;
+ webrtc::AgcConfig agc_config;
+ bool ns_enabled;
+ webrtc::NsModes ns_mode;
+ bool highpass_filter_enabled;
+ bool stereo_swapping_enabled;
+ bool typing_detection_enabled;
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAecmMode(aecm_mode, cng_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetAgcConfig(agc_config);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_TRUE(ec_metrics_enabled);
+ EXPECT_FALSE(cng_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_EQ(0, agc_config.targetLeveldBOv);
+ EXPECT_TRUE(ns_enabled);
+ EXPECT_TRUE(highpass_filter_enabled);
+ EXPECT_FALSE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+ EXPECT_EQ(ec_mode, webrtc::kEcConference);
+ EXPECT_EQ(ns_mode, webrtc::kNsHighSuppression);
+
+ // Nothing set, so all ignored.
+ cricket::AudioOptions options;
+ ASSERT_TRUE(engine_.SetAudioOptions(options));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAecmMode(aecm_mode, cng_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetAgcConfig(agc_config);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_TRUE(ec_metrics_enabled);
+ EXPECT_FALSE(cng_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_EQ(0, agc_config.targetLeveldBOv);
+ EXPECT_TRUE(ns_enabled);
+ EXPECT_TRUE(highpass_filter_enabled);
+ EXPECT_FALSE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+ EXPECT_EQ(ec_mode, webrtc::kEcConference);
+ EXPECT_EQ(ns_mode, webrtc::kNsHighSuppression);
+
+ // Turn echo cancellation off
+ options.echo_cancellation.Set(false);
+ ASSERT_TRUE(engine_.SetAudioOptions(options));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ EXPECT_FALSE(ec_enabled);
+
+ // Turn echo cancellation back on, with settings, and make sure
+ // nothing else changed.
+ options.echo_cancellation.Set(true);
+ ASSERT_TRUE(engine_.SetAudioOptions(options));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAecmMode(aecm_mode, cng_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetAgcConfig(agc_config);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_TRUE(ec_metrics_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_EQ(0, agc_config.targetLeveldBOv);
+ EXPECT_TRUE(ns_enabled);
+ EXPECT_TRUE(highpass_filter_enabled);
+ EXPECT_FALSE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+ EXPECT_EQ(ec_mode, webrtc::kEcConference);
+ EXPECT_EQ(ns_mode, webrtc::kNsHighSuppression);
+
+ // Turn off AGC
+ options.auto_gain_control.Set(false);
+ ASSERT_TRUE(engine_.SetAudioOptions(options));
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ EXPECT_FALSE(agc_enabled);
+
+ // Turn AGC back on
+ options.auto_gain_control.Set(true);
+ options.adjust_agc_delta.Clear();
+ ASSERT_TRUE(engine_.SetAudioOptions(options));
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ EXPECT_TRUE(agc_enabled);
+ voe_.GetAgcConfig(agc_config);
+ EXPECT_EQ(0, agc_config.targetLeveldBOv);
+
+ // Turn off other options (and stereo swapping on).
+ options.noise_suppression.Set(false);
+ options.highpass_filter.Set(false);
+ options.typing_detection.Set(false);
+ options.stereo_swapping.Set(true);
+ ASSERT_TRUE(engine_.SetAudioOptions(options));
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_FALSE(ns_enabled);
+ EXPECT_FALSE(highpass_filter_enabled);
+ EXPECT_FALSE(typing_detection_enabled);
+ EXPECT_TRUE(stereo_swapping_enabled);
+
+ // Turn on "conference mode" to ensure it has no impact.
+ options.conference_mode.Set(true);
+ ASSERT_TRUE(engine_.SetAudioOptions(options));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_EQ(webrtc::kEcConference, ec_mode);
+ EXPECT_FALSE(ns_enabled);
+ EXPECT_EQ(webrtc::kNsHighSuppression, ns_mode);
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, SetOptions) {
+ EXPECT_TRUE(SetupEngine());
+
+ bool ec_enabled;
+ webrtc::EcModes ec_mode;
+ bool ec_metrics_enabled;
+ bool agc_enabled;
+ webrtc::AgcModes agc_mode;
+ bool ns_enabled;
+ webrtc::NsModes ns_mode;
+ bool highpass_filter_enabled;
+ bool stereo_swapping_enabled;
+ bool typing_detection_enabled;
+
+ ASSERT_TRUE(engine_.SetOptions(0));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_FALSE(ec_enabled);
+ EXPECT_FALSE(agc_enabled);
+ EXPECT_FALSE(ns_enabled);
+ EXPECT_FALSE(highpass_filter_enabled);
+ EXPECT_FALSE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+
+ ASSERT_TRUE(engine_.SetOptions(
+ cricket::MediaEngineInterface::ECHO_CANCELLATION));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_FALSE(agc_enabled);
+ EXPECT_FALSE(ns_enabled);
+ EXPECT_FALSE(highpass_filter_enabled);
+ EXPECT_FALSE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+
+ ASSERT_TRUE(engine_.SetOptions(
+ cricket::MediaEngineInterface::AUTO_GAIN_CONTROL));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_FALSE(ec_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_FALSE(ns_enabled);
+ EXPECT_FALSE(highpass_filter_enabled);
+ EXPECT_FALSE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+
+ ASSERT_TRUE(engine_.SetOptions(
+ cricket::MediaEngineInterface::NOISE_SUPPRESSION));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_FALSE(ec_enabled);
+ EXPECT_FALSE(agc_enabled);
+ EXPECT_TRUE(ns_enabled);
+ EXPECT_FALSE(highpass_filter_enabled);
+ EXPECT_FALSE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+
+ ASSERT_TRUE(engine_.SetOptions(
+ cricket::MediaEngineInterface::HIGHPASS_FILTER));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_FALSE(ec_enabled);
+ EXPECT_FALSE(agc_enabled);
+ EXPECT_FALSE(ns_enabled);
+ EXPECT_TRUE(highpass_filter_enabled);
+ EXPECT_FALSE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+
+ ASSERT_TRUE(engine_.SetOptions(
+ cricket::MediaEngineInterface::STEREO_FLIPPING));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_FALSE(ec_enabled);
+ EXPECT_FALSE(agc_enabled);
+ EXPECT_FALSE(ns_enabled);
+ EXPECT_FALSE(highpass_filter_enabled);
+ EXPECT_TRUE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+
+ ASSERT_TRUE(engine_.SetOptions(
+ cricket::MediaEngineInterface::DEFAULT_AUDIO_OPTIONS));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_TRUE(ns_enabled);
+ EXPECT_TRUE(highpass_filter_enabled);
+ EXPECT_FALSE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+
+ ASSERT_TRUE(engine_.SetOptions(
+ cricket::MediaEngineInterface::ALL_AUDIO_OPTIONS));
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetEcMetricsStatus(ec_metrics_enabled);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ highpass_filter_enabled = voe_.IsHighPassFilterEnabled();
+ stereo_swapping_enabled = voe_.IsStereoChannelSwappingEnabled();
+ voe_.GetTypingDetectionStatus(typing_detection_enabled);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_TRUE(ns_enabled);
+ EXPECT_TRUE(highpass_filter_enabled);
+ EXPECT_TRUE(stereo_swapping_enabled);
+ EXPECT_TRUE(typing_detection_enabled);
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, InitDoesNotOverwriteDefaultAgcConfig) {
+ webrtc::AgcConfig set_config = {0};
+ set_config.targetLeveldBOv = 3;
+ set_config.digitalCompressionGaindB = 9;
+ set_config.limiterEnable = true;
+ EXPECT_EQ(0, voe_.SetAgcConfig(set_config));
+ EXPECT_TRUE(engine_.Init(talk_base::Thread::Current()));
+
+ webrtc::AgcConfig config = {0};
+ EXPECT_EQ(0, voe_.GetAgcConfig(config));
+ EXPECT_EQ(set_config.targetLeveldBOv, config.targetLeveldBOv);
+ EXPECT_EQ(set_config.digitalCompressionGaindB,
+ config.digitalCompressionGaindB);
+ EXPECT_EQ(set_config.limiterEnable, config.limiterEnable);
+}
+
+
+TEST_F(WebRtcVoiceEngineTestFake, SetOptionOverridesViaChannels) {
+ EXPECT_TRUE(SetupEngine());
+ talk_base::scoped_ptr<cricket::VoiceMediaChannel> channel1(
+ engine_.CreateChannel());
+ talk_base::scoped_ptr<cricket::VoiceMediaChannel> channel2(
+ engine_.CreateChannel());
+
+ // Have to add a stream to make SetSend work.
+ cricket::StreamParams stream1;
+ stream1.ssrcs.push_back(1);
+ channel1->AddSendStream(stream1);
+ cricket::StreamParams stream2;
+ stream2.ssrcs.push_back(2);
+ channel2->AddSendStream(stream2);
+
+ // AEC and AGC and NS
+ cricket::AudioOptions options_all;
+ options_all.echo_cancellation.Set(true);
+ options_all.auto_gain_control.Set(true);
+ options_all.noise_suppression.Set(true);
+
+ ASSERT_TRUE(channel1->SetOptions(options_all));
+ cricket::AudioOptions expected_options = options_all;
+ cricket::AudioOptions actual_options;
+ ASSERT_TRUE(channel1->GetOptions(&actual_options));
+ EXPECT_EQ(expected_options, actual_options);
+ ASSERT_TRUE(channel2->SetOptions(options_all));
+ ASSERT_TRUE(channel2->GetOptions(&actual_options));
+ EXPECT_EQ(expected_options, actual_options);
+
+ // unset NS
+ cricket::AudioOptions options_no_ns;
+ options_no_ns.noise_suppression.Set(false);
+ ASSERT_TRUE(channel1->SetOptions(options_no_ns));
+
+ expected_options.echo_cancellation.Set(true);
+ expected_options.auto_gain_control.Set(true);
+ expected_options.noise_suppression.Set(false);
+ ASSERT_TRUE(channel1->GetOptions(&actual_options));
+ EXPECT_EQ(expected_options, actual_options);
+
+ // unset AGC
+ cricket::AudioOptions options_no_agc;
+ options_no_agc.auto_gain_control.Set(false);
+ ASSERT_TRUE(channel2->SetOptions(options_no_agc));
+
+ expected_options.echo_cancellation.Set(true);
+ expected_options.auto_gain_control.Set(false);
+ expected_options.noise_suppression.Set(true);
+ ASSERT_TRUE(channel2->GetOptions(&actual_options));
+ EXPECT_EQ(expected_options, actual_options);
+
+ ASSERT_TRUE(engine_.SetAudioOptions(options_all));
+ bool ec_enabled;
+ webrtc::EcModes ec_mode;
+ bool agc_enabled;
+ webrtc::AgcModes agc_mode;
+ bool ns_enabled;
+ webrtc::NsModes ns_mode;
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_TRUE(ns_enabled);
+
+ channel1->SetSend(cricket::SEND_MICROPHONE);
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_FALSE(ns_enabled);
+
+ channel1->SetSend(cricket::SEND_NOTHING);
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_TRUE(ns_enabled);
+
+ channel2->SetSend(cricket::SEND_MICROPHONE);
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_FALSE(agc_enabled);
+ EXPECT_TRUE(ns_enabled);
+
+ channel2->SetSend(cricket::SEND_NOTHING);
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_TRUE(agc_enabled);
+ EXPECT_TRUE(ns_enabled);
+
+ // Make sure settings take effect while we are sending.
+ ASSERT_TRUE(engine_.SetAudioOptions(options_all));
+ cricket::AudioOptions options_no_agc_nor_ns;
+ options_no_agc_nor_ns.auto_gain_control.Set(false);
+ options_no_agc_nor_ns.noise_suppression.Set(false);
+ channel2->SetSend(cricket::SEND_MICROPHONE);
+ channel2->SetOptions(options_no_agc_nor_ns);
+
+ expected_options.echo_cancellation.Set(true);
+ expected_options.auto_gain_control.Set(false);
+ expected_options.noise_suppression.Set(false);
+ ASSERT_TRUE(channel2->GetOptions(&actual_options));
+ EXPECT_EQ(expected_options, actual_options);
+ voe_.GetEcStatus(ec_enabled, ec_mode);
+ voe_.GetAgcStatus(agc_enabled, agc_mode);
+ voe_.GetNsStatus(ns_enabled, ns_mode);
+ EXPECT_TRUE(ec_enabled);
+ EXPECT_FALSE(agc_enabled);
+ EXPECT_FALSE(ns_enabled);
+}
+
+// Test that GetReceiveChannelNum returns the default channel for the first
+// recv stream in 1-1 calls.
+TEST_F(WebRtcVoiceEngineTestFake, TestGetReceiveChannelNumIn1To1Calls) {
+ EXPECT_TRUE(SetupEngine());
+ cricket::WebRtcVoiceMediaChannel* media_channel =
+ static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
+ // Test that GetChannelNum returns the default channel if the SSRC is unknown.
+ EXPECT_EQ(media_channel->voe_channel(),
+ media_channel->GetReceiveChannelNum(0));
+ cricket::StreamParams stream;
+ stream.ssrcs.push_back(kSsrc2);
+ EXPECT_TRUE(channel_->AddRecvStream(stream));
+ EXPECT_EQ(media_channel->voe_channel(),
+ media_channel->GetReceiveChannelNum(kSsrc2));
+}
+
+// Test that GetReceiveChannelNum doesn't return the default channel for the
+// first recv stream in conference calls.
+TEST_F(WebRtcVoiceEngineTestFake, TestGetChannelNumInConferenceCalls) {
+ EXPECT_TRUE(SetupEngine());
+ EXPECT_TRUE(channel_->SetOptions(options_conference_));
+ cricket::StreamParams stream;
+ stream.ssrcs.push_back(kSsrc2);
+ EXPECT_TRUE(channel_->AddRecvStream(stream));
+ cricket::WebRtcVoiceMediaChannel* media_channel =
+ static_cast<cricket::WebRtcVoiceMediaChannel*>(channel_);
+ EXPECT_LT(media_channel->voe_channel(),
+ media_channel->GetReceiveChannelNum(kSsrc2));
+}
+
+TEST_F(WebRtcVoiceEngineTestFake, SetOutputScaling) {
+ EXPECT_TRUE(SetupEngine());
+ double left, right;
+ EXPECT_TRUE(channel_->SetOutputScaling(0, 1, 2));
+ EXPECT_TRUE(channel_->GetOutputScaling(0, &left, &right));
+ EXPECT_DOUBLE_EQ(1, left);
+ EXPECT_DOUBLE_EQ(2, right);
+
+ EXPECT_FALSE(channel_->SetOutputScaling(kSsrc2, 1, 2));
+ cricket::StreamParams stream;
+ stream.ssrcs.push_back(kSsrc2);
+ EXPECT_TRUE(channel_->AddRecvStream(stream));
+
+ EXPECT_TRUE(channel_->SetOutputScaling(kSsrc2, 2, 1));
+ EXPECT_TRUE(channel_->GetOutputScaling(kSsrc2, &left, &right));
+ EXPECT_DOUBLE_EQ(2, left);
+ EXPECT_DOUBLE_EQ(1, right);
+}
+
+
+// Tests for the actual WebRtc VoE library.
+
+// Tests that the library initializes and shuts down properly.
+TEST(WebRtcVoiceEngineTest, StartupShutdown) {
+ cricket::WebRtcVoiceEngine engine;
+ EXPECT_TRUE(engine.Init(talk_base::Thread::Current()));
+ cricket::VoiceMediaChannel* channel = engine.CreateChannel();
+ EXPECT_TRUE(channel != NULL);
+ delete channel;
+ engine.Terminate();
+
+ // Reinit to catch regression where VoiceEngineObserver reference is lost
+ EXPECT_TRUE(engine.Init(talk_base::Thread::Current()));
+ engine.Terminate();
+}
+
+// Tests that the logging from the library is cleartext.
+TEST(WebRtcVoiceEngineTest, DISABLED_HasUnencryptedLogging) {
+ cricket::WebRtcVoiceEngine engine;
+ talk_base::scoped_ptr<talk_base::MemoryStream> stream(
+ new talk_base::MemoryStream);
+ size_t size = 0;
+ bool cleartext = true;
+ talk_base::LogMessage::AddLogToStream(stream.get(), talk_base::LS_VERBOSE);
+ engine.SetLogging(talk_base::LS_VERBOSE, "");
+ EXPECT_TRUE(engine.Init(talk_base::Thread::Current()));
+ EXPECT_TRUE(stream->GetSize(&size));
+ EXPECT_GT(size, 0U);
+ engine.Terminate();
+ talk_base::LogMessage::RemoveLogToStream(stream.get());
+ const char* buf = stream->GetBuffer();
+ for (size_t i = 0; i < size && cleartext; ++i) {
+ int ch = static_cast<int>(buf[i]);
+ ASSERT_GE(ch, 0) << "Out of bounds character in WebRtc VoE log: "
+ << std::hex << ch;
+ cleartext = (isprint(ch) || isspace(ch));
+ }
+ EXPECT_TRUE(cleartext);
+}
+
+// Tests we do not see any references to a monitor thread being spun up
+// when initiating the engine.
+TEST(WebRtcVoiceEngineTest, HasNoMonitorThread) {
+ cricket::WebRtcVoiceEngine engine;
+ talk_base::scoped_ptr<talk_base::MemoryStream> stream(
+ new talk_base::MemoryStream);
+ talk_base::LogMessage::AddLogToStream(stream.get(), talk_base::LS_VERBOSE);
+ engine.SetLogging(talk_base::LS_VERBOSE, "");
+ EXPECT_TRUE(engine.Init(talk_base::Thread::Current()));
+ engine.Terminate();
+ talk_base::LogMessage::RemoveLogToStream(stream.get());
+
+ size_t size = 0;
+ EXPECT_TRUE(stream->GetSize(&size));
+ EXPECT_GT(size, 0U);
+ const std::string logs(stream->GetBuffer());
+ EXPECT_NE(std::string::npos, logs.find("ProcessThread"));
+}
+
+// Tests that the library is configured with the codecs we want.
+TEST(WebRtcVoiceEngineTest, HasCorrectCodecs) {
+ cricket::WebRtcVoiceEngine engine;
+ // Check codecs by name.
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "OPUS", 48000, 0, 2, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "ISAC", 16000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "ISAC", 32000, 0, 1, 0)));
+ // Check that name matching is case-insensitive.
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "ILBC", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "iLBC", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "PCMU", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "PCMA", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "G722", 16000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "red", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "CN", 48000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "CN", 32000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "CN", 16000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "CN", 8000, 0, 1, 0)));
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(96, "telephone-event", 8000, 0, 1, 0)));
+ // Check codecs with an id by id.
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(0, "", 8000, 0, 1, 0))); // PCMU
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(8, "", 8000, 0, 1, 0))); // PCMA
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(9, "", 16000, 0, 1, 0))); // G722
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(13, "", 8000, 0, 1, 0))); // CN
+ // Check sample/bitrate matching.
+ EXPECT_TRUE(engine.FindCodec(
+ cricket::AudioCodec(0, "PCMU", 8000, 64000, 1, 0)));
+ // Check that bad codecs fail.
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(99, "ABCD", 0, 0, 1, 0)));
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(88, "", 0, 0, 1, 0)));
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 0, 0, 2, 0)));
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 5000, 0, 1, 0)));
+ EXPECT_FALSE(engine.FindCodec(cricket::AudioCodec(0, "", 0, 5000, 1, 0)));
+ // Check that there aren't any extra codecs lying around.
+ EXPECT_EQ(13U, engine.codecs().size());
+ // Verify the payload id of common audio codecs, including CN, ISAC, and G722.
+ for (std::vector<cricket::AudioCodec>::const_iterator it =
+ engine.codecs().begin(); it != engine.codecs().end(); ++it) {
+ if (it->name == "CN" && it->clockrate == 16000) {
+ EXPECT_EQ(105, it->id);
+ } else if (it->name == "CN" && it->clockrate == 32000) {
+ EXPECT_EQ(106, it->id);
+ } else if (it->name == "ISAC" && it->clockrate == 16000) {
+ EXPECT_EQ(103, it->id);
+ } else if (it->name == "ISAC" && it->clockrate == 32000) {
+ EXPECT_EQ(104, it->id);
+ } else if (it->name == "G722" && it->clockrate == 16000) {
+ EXPECT_EQ(9, it->id);
+ } else if (it->name == "telephone-event") {
+ EXPECT_EQ(126, it->id);
+ } else if (it->name == "red") {
+ EXPECT_EQ(127, it->id);
+ } else if (it->name == "opus") {
+ EXPECT_EQ(111, it->id);
+ ASSERT_NE(it->params.find("minptime"), it->params.end());
+ EXPECT_EQ("10", it->params.find("minptime")->second);
+ ASSERT_NE(it->params.find("maxptime"), it->params.end());
+ EXPECT_EQ("60", it->params.find("maxptime")->second);
+ }
+ }
+
+ engine.Terminate();
+}
+
+// Tests that VoE supports at least 32 channels
+TEST(WebRtcVoiceEngineTest, Has32Channels) {
+ cricket::WebRtcVoiceEngine engine;
+ EXPECT_TRUE(engine.Init(talk_base::Thread::Current()));
+
+ cricket::VoiceMediaChannel* channels[32];
+ int num_channels = 0;
+
+ while (num_channels < ARRAY_SIZE(channels)) {
+ cricket::VoiceMediaChannel* channel = engine.CreateChannel();
+ if (!channel)
+ break;
+
+ channels[num_channels++] = channel;
+ }
+
+ int expected = ARRAY_SIZE(channels);
+ EXPECT_EQ(expected, num_channels);
+
+ while (num_channels > 0) {
+ delete channels[--num_channels];
+ }
+
+ engine.Terminate();
+}
+
+// Test that we set our preferred codecs properly.
+TEST(WebRtcVoiceEngineTest, SetRecvCodecs) {
+ cricket::WebRtcVoiceEngine engine;
+ EXPECT_TRUE(engine.Init(talk_base::Thread::Current()));
+ cricket::WebRtcVoiceMediaChannel channel(&engine);
+ EXPECT_TRUE(channel.SetRecvCodecs(engine.codecs()));
+}
+
+#ifdef WIN32
+// Test our workarounds to WebRtc VoE' munging of the coinit count
+TEST(WebRtcVoiceEngineTest, CoInitialize) {
+ cricket::WebRtcVoiceEngine* engine = new cricket::WebRtcVoiceEngine();
+
+ // Initial refcount should be 0.
+ EXPECT_EQ(S_OK, CoInitializeEx(NULL, COINIT_MULTITHREADED));
+
+ // Engine should start even with COM already inited.
+ EXPECT_TRUE(engine->Init(talk_base::Thread::Current()));
+ engine->Terminate();
+ EXPECT_TRUE(engine->Init(talk_base::Thread::Current()));
+ engine->Terminate();
+
+ // Refcount after terminate should be 1 (in reality 3); test if it is nonzero.
+ EXPECT_EQ(S_FALSE, CoInitializeEx(NULL, COINIT_MULTITHREADED));
+ // Decrement refcount to (hopefully) 0.
+ CoUninitialize();
+ CoUninitialize();
+ delete engine;
+
+ // Ensure refcount is 0.
+ EXPECT_EQ(S_OK, CoInitializeEx(NULL, COINIT_MULTITHREADED));
+ CoUninitialize();
+}
+#endif
+
+