summaryrefslogtreecommitdiff
path: root/Source/WebCore/platform/audio
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/WebCore/platform/audio
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/WebCore/platform/audio')
-rw-r--r--Source/WebCore/platform/audio/AudioArray.h7
-rw-r--r--Source/WebCore/platform/audio/AudioBus.cpp49
-rw-r--r--Source/WebCore/platform/audio/AudioBus.h15
-rw-r--r--Source/WebCore/platform/audio/AudioChannel.cpp2
-rw-r--r--Source/WebCore/platform/audio/AudioChannel.h2
-rw-r--r--Source/WebCore/platform/audio/AudioDSPKernelProcessor.h16
-rw-r--r--Source/WebCore/platform/audio/AudioDestination.h2
-rw-r--r--Source/WebCore/platform/audio/AudioDestinationConsumer.h50
-rw-r--r--Source/WebCore/platform/audio/AudioFIFO.cpp144
-rw-r--r--Source/WebCore/platform/audio/AudioFIFO.h75
-rw-r--r--Source/WebCore/platform/audio/AudioFileReader.h2
-rw-r--r--Source/WebCore/platform/audio/AudioHardwareListener.cpp (renamed from Source/WebCore/platform/audio/MediaSession.cpp)48
-rw-r--r--Source/WebCore/platform/audio/AudioHardwareListener.h (renamed from Source/WebCore/platform/audio/MediaSession.h)84
-rw-r--r--Source/WebCore/platform/audio/AudioIOCallback.h4
-rw-r--r--Source/WebCore/platform/audio/AudioPullFIFO.cpp75
-rw-r--r--Source/WebCore/platform/audio/AudioPullFIFO.h74
-rw-r--r--Source/WebCore/platform/audio/AudioSession.cpp35
-rw-r--r--Source/WebCore/platform/audio/AudioSession.h21
-rw-r--r--Source/WebCore/platform/audio/AudioSourceProvider.h2
-rw-r--r--Source/WebCore/platform/audio/AudioStreamDescription.h71
-rw-r--r--Source/WebCore/platform/audio/Biquad.cpp47
-rw-r--r--Source/WebCore/platform/audio/Biquad.h16
-rw-r--r--Source/WebCore/platform/audio/Cone.cpp2
-rw-r--r--Source/WebCore/platform/audio/Cone.h2
-rw-r--r--Source/WebCore/platform/audio/DirectConvolver.cpp29
-rw-r--r--Source/WebCore/platform/audio/DirectConvolver.h9
-rw-r--r--Source/WebCore/platform/audio/Distance.cpp10
-rw-r--r--Source/WebCore/platform/audio/Distance.h20
-rw-r--r--Source/WebCore/platform/audio/DownSampler.cpp2
-rw-r--r--Source/WebCore/platform/audio/DownSampler.h2
-rw-r--r--Source/WebCore/platform/audio/DynamicsCompressor.cpp2
-rw-r--r--Source/WebCore/platform/audio/DynamicsCompressor.h2
-rw-r--r--Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp2
-rw-r--r--Source/WebCore/platform/audio/DynamicsCompressorKernel.h2
-rw-r--r--Source/WebCore/platform/audio/EqualPowerPanner.cpp2
-rw-r--r--Source/WebCore/platform/audio/EqualPowerPanner.h8
-rw-r--r--Source/WebCore/platform/audio/FFTConvolver.cpp2
-rw-r--r--Source/WebCore/platform/audio/FFTConvolver.h2
-rw-r--r--Source/WebCore/platform/audio/FFTFrame.cpp2
-rw-r--r--Source/WebCore/platform/audio/FFTFrame.h88
-rw-r--r--Source/WebCore/platform/audio/FFTFrameStub.cpp4
-rw-r--r--Source/WebCore/platform/audio/HRTFDatabase.cpp4
-rw-r--r--Source/WebCore/platform/audio/HRTFDatabase.h3
-rw-r--r--Source/WebCore/platform/audio/HRTFDatabaseLoader.cpp10
-rw-r--r--Source/WebCore/platform/audio/HRTFDatabaseLoader.h6
-rw-r--r--Source/WebCore/platform/audio/HRTFElevation.cpp23
-rw-r--r--Source/WebCore/platform/audio/HRTFElevation.h7
-rw-r--r--Source/WebCore/platform/audio/HRTFKernel.cpp4
-rw-r--r--Source/WebCore/platform/audio/HRTFKernel.h6
-rw-r--r--Source/WebCore/platform/audio/HRTFPanner.cpp3
-rw-r--r--Source/WebCore/platform/audio/HRTFPanner.h8
-rw-r--r--Source/WebCore/platform/audio/MediaSessionManager.cpp166
-rw-r--r--Source/WebCore/platform/audio/MediaSessionManager.h85
-rw-r--r--Source/WebCore/platform/audio/MultiChannelResampler.cpp4
-rw-r--r--Source/WebCore/platform/audio/MultiChannelResampler.h2
-rw-r--r--Source/WebCore/platform/audio/Panner.cpp14
-rw-r--r--Source/WebCore/platform/audio/Panner.h26
-rw-r--r--Source/WebCore/platform/audio/PlatformAudioData.h (renamed from Source/WebCore/platform/audio/AudioSessionListener.h)30
-rw-r--r--Source/WebCore/platform/audio/PlatformMediaSession.cpp368
-rw-r--r--Source/WebCore/platform/audio/PlatformMediaSession.h239
-rw-r--r--Source/WebCore/platform/audio/PlatformMediaSessionManager.cpp444
-rw-r--r--Source/WebCore/platform/audio/PlatformMediaSessionManager.h157
-rw-r--r--Source/WebCore/platform/audio/Reverb.cpp2
-rw-r--r--Source/WebCore/platform/audio/Reverb.h2
-rw-r--r--Source/WebCore/platform/audio/ReverbAccumulationBuffer.cpp2
-rw-r--r--Source/WebCore/platform/audio/ReverbAccumulationBuffer.h2
-rw-r--r--Source/WebCore/platform/audio/ReverbConvolver.cpp17
-rw-r--r--Source/WebCore/platform/audio/ReverbConvolver.h11
-rw-r--r--Source/WebCore/platform/audio/ReverbConvolverStage.cpp7
-rw-r--r--Source/WebCore/platform/audio/ReverbConvolverStage.h2
-rw-r--r--Source/WebCore/platform/audio/ReverbInputBuffer.cpp2
-rw-r--r--Source/WebCore/platform/audio/ReverbInputBuffer.h2
-rw-r--r--Source/WebCore/platform/audio/SincResampler.cpp8
-rw-r--r--Source/WebCore/platform/audio/SincResampler.h2
-rw-r--r--Source/WebCore/platform/audio/UpSampler.cpp2
-rw-r--r--Source/WebCore/platform/audio/UpSampler.h2
-rw-r--r--Source/WebCore/platform/audio/VectorMath.cpp38
-rw-r--r--Source/WebCore/platform/audio/WebAudioBufferList.cpp101
-rw-r--r--Source/WebCore/platform/audio/WebAudioBufferList.h68
-rw-r--r--Source/WebCore/platform/audio/ZeroPole.cpp2
-rw-r--r--Source/WebCore/platform/audio/ZeroPole.h2
-rw-r--r--Source/WebCore/platform/audio/glib/AudioBusGLib.cpp (renamed from Source/WebCore/platform/audio/gtk/AudioBusGtk.cpp)19
-rw-r--r--Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp88
-rw-r--r--Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h10
-rw-r--r--Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp295
-rw-r--r--Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp349
-rw-r--r--Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.h70
-rw-r--r--Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp222
88 files changed, 2907 insertions, 1062 deletions
diff --git a/Source/WebCore/platform/audio/AudioArray.h b/Source/WebCore/platform/audio/AudioArray.h
index b1fa7a5f4..6307e2b72 100644
--- a/Source/WebCore/platform/audio/AudioArray.h
+++ b/Source/WebCore/platform/audio/AudioArray.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -55,12 +55,7 @@ public:
void allocate(Checked<size_t> n)
{
Checked<unsigned> initialSize = sizeof(T) * n;
-
-#if USE(WEBAUDIO_FFMPEG) || USE(WEBAUDIO_OPENMAX_DL_FFT)
- const size_t alignment = 32;
-#else
const size_t alignment = 16;
-#endif
if (m_allocation)
fastFree(m_allocation);
diff --git a/Source/WebCore/platform/audio/AudioBus.cpp b/Source/WebCore/platform/audio/AudioBus.cpp
index 077e1f855..a5d8fa2c5 100644
--- a/Source/WebCore/platform/audio/AudioBus.cpp
+++ b/Source/WebCore/platform/audio/AudioBus.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -46,13 +46,13 @@ using namespace VectorMath;
const unsigned MaxBusChannels = 32;
-PassRefPtr<AudioBus> AudioBus::create(unsigned numberOfChannels, size_t length, bool allocate)
+RefPtr<AudioBus> AudioBus::create(unsigned numberOfChannels, size_t length, bool allocate)
{
ASSERT(numberOfChannels <= MaxBusChannels);
if (numberOfChannels > MaxBusChannels)
- return 0;
+ return nullptr;
- return adoptRef(new AudioBus(numberOfChannels, length, allocate));
+ return adoptRef(*new AudioBus(numberOfChannels, length, allocate));
}
AudioBus::AudioBus(unsigned numberOfChannels, size_t length, bool allocate)
@@ -65,7 +65,7 @@ AudioBus::AudioBus(unsigned numberOfChannels, size_t length, bool allocate)
for (unsigned i = 0; i < numberOfChannels; ++i) {
auto channel = allocate ? std::make_unique<AudioChannel>(length) : std::make_unique<AudioChannel>(nullptr, length);
- m_channels.append(std::move(channel));
+ m_channels.uncheckedAppend(WTFMove(channel));
}
m_layout = LayoutCanonical; // for now this is the only layout we define
@@ -167,7 +167,7 @@ bool AudioBus::topologyMatches(const AudioBus& bus) const
return true;
}
-PassRefPtr<AudioBus> AudioBus::createBufferFromRange(const AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame)
+RefPtr<AudioBus> AudioBus::createBufferFromRange(const AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame)
{
size_t numberOfSourceFrames = sourceBuffer->length();
unsigned numberOfChannels = sourceBuffer->numberOfChannels();
@@ -176,7 +176,7 @@ PassRefPtr<AudioBus> AudioBus::createBufferFromRange(const AudioBus* sourceBuffe
bool isRangeSafe = startFrame < endFrame && endFrame <= numberOfSourceFrames;
ASSERT(isRangeSafe);
if (!isRangeSafe)
- return 0;
+ return nullptr;
size_t rangeLength = endFrame - startFrame;
@@ -213,6 +213,33 @@ void AudioBus::scale(float scale)
channel(i)->scale(scale);
}
+void AudioBus::copyFromRange(const AudioBus& sourceBus, unsigned startFrame, unsigned endFrame)
+{
+ if (!topologyMatches(sourceBus)) {
+ ASSERT_NOT_REACHED();
+ zero();
+ return;
+ }
+
+ size_t numberOfSourceFrames = sourceBus.length();
+ bool isRangeSafe = startFrame < endFrame && endFrame <= numberOfSourceFrames;
+ ASSERT(isRangeSafe);
+ if (!isRangeSafe) {
+ zero();
+ return;
+ }
+
+ unsigned numberOfChannels = this->numberOfChannels();
+ ASSERT(numberOfChannels <= MaxBusChannels);
+ if (numberOfChannels > MaxBusChannels) {
+ zero();
+ return;
+ }
+
+ for (unsigned i = 0; i < numberOfChannels; ++i)
+ channel(i)->copyFromRange(sourceBus.channel(i), startFrame, endFrame);
+}
+
void AudioBus::copyFrom(const AudioBus& sourceBus, ChannelInterpretation channelInterpretation)
{
if (&sourceBus == this)
@@ -521,12 +548,12 @@ void AudioBus::copyWithSampleAccurateGainValuesFrom(const AudioBus &sourceBus, f
}
}
-PassRefPtr<AudioBus> AudioBus::createBySampleRateConverting(const AudioBus* sourceBus, bool mixToMono, double newSampleRate)
+RefPtr<AudioBus> AudioBus::createBySampleRateConverting(const AudioBus* sourceBus, bool mixToMono, double newSampleRate)
{
// sourceBus's sample-rate must be known.
ASSERT(sourceBus && sourceBus->sampleRate());
if (!sourceBus || !sourceBus->sampleRate())
- return 0;
+ return nullptr;
double sourceSampleRate = sourceBus->sampleRate();
double destinationSampleRate = newSampleRate;
@@ -584,7 +611,7 @@ PassRefPtr<AudioBus> AudioBus::createBySampleRateConverting(const AudioBus* sour
return destinationBus;
}
-PassRefPtr<AudioBus> AudioBus::createByMixingToMono(const AudioBus* sourceBus)
+RefPtr<AudioBus> AudioBus::createByMixingToMono(const AudioBus* sourceBus)
{
if (sourceBus->isSilent())
return create(1, sourceBus->length());
@@ -613,7 +640,7 @@ PassRefPtr<AudioBus> AudioBus::createByMixingToMono(const AudioBus* sourceBus)
}
ASSERT_NOT_REACHED();
- return 0;
+ return nullptr;
}
bool AudioBus::isSilent() const
diff --git a/Source/WebCore/platform/audio/AudioBus.h b/Source/WebCore/platform/audio/AudioBus.h
index 19cb69501..d18dbdd94 100644
--- a/Source/WebCore/platform/audio/AudioBus.h
+++ b/Source/WebCore/platform/audio/AudioBus.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -66,7 +66,7 @@ public:
// allocate indicates whether or not to initially have the AudioChannels created with managed storage.
// Normal usage is to pass true here, in which case the AudioChannels will memory-manage their own storage.
// If allocate is false then setChannelMemory() has to be called later on for each channel before the AudioBus is useable...
- static PassRefPtr<AudioBus> create(unsigned numberOfChannels, size_t length, bool allocate = true);
+ static RefPtr<AudioBus> create(unsigned numberOfChannels, size_t length, bool allocate = true);
// Tells the given channel to use an externally allocated buffer.
void setChannelMemory(unsigned channelIndex, float* storage, size_t length);
@@ -75,7 +75,7 @@ public:
unsigned numberOfChannels() const { return m_channels.size(); }
AudioChannel* channel(unsigned channel) { return m_channels[channel].get(); }
- const AudioChannel* channel(unsigned channel) const { return const_cast<AudioBus*>(this)->m_channels[channel].get(); }
+ const AudioChannel* channel(unsigned channel) const { return m_channels[channel].get(); }
AudioChannel* channelByType(unsigned type);
const AudioChannel* channelByType(unsigned type) const;
@@ -104,17 +104,17 @@ public:
// Creates a new buffer from a range in the source buffer.
// 0 may be returned if the range does not fit in the sourceBuffer
- static PassRefPtr<AudioBus> createBufferFromRange(const AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame);
+ static RefPtr<AudioBus> createBufferFromRange(const AudioBus* sourceBuffer, unsigned startFrame, unsigned endFrame);
// Creates a new AudioBus by sample-rate converting sourceBus to the newSampleRate.
// setSampleRate() must have been previously called on sourceBus.
// Note: sample-rate conversion is already handled in the file-reading code for the mac port, so we don't need this.
- static PassRefPtr<AudioBus> createBySampleRateConverting(const AudioBus* sourceBus, bool mixToMono, double newSampleRate);
+ static RefPtr<AudioBus> createBySampleRateConverting(const AudioBus* sourceBus, bool mixToMono, double newSampleRate);
// Creates a new AudioBus by mixing all the channels down to mono.
// If sourceBus is already mono, then the returned AudioBus will simply be a copy.
- static PassRefPtr<AudioBus> createByMixingToMono(const AudioBus* sourceBus);
+ static RefPtr<AudioBus> createByMixingToMono(const AudioBus* sourceBus);
// Scales all samples by the same amount.
void scale(float scale);
@@ -122,6 +122,9 @@ public:
void reset() { m_isFirstTime = true; } // for de-zippering
// Copies the samples from the source bus to this one.
+ void copyFromRange(const AudioBus& sourceBus, unsigned startFrame, unsigned endFrame);
+
+ // Copies the samples from the source bus to this one.
// This is just a simple per-channel copy if the number of channels match, otherwise an up-mix or down-mix is done.
void copyFrom(const AudioBus& sourceBus, ChannelInterpretation = Speakers);
diff --git a/Source/WebCore/platform/audio/AudioChannel.cpp b/Source/WebCore/platform/audio/AudioChannel.cpp
index dce7a3314..65765dff2 100644
--- a/Source/WebCore/platform/audio/AudioChannel.cpp
+++ b/Source/WebCore/platform/audio/AudioChannel.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/AudioChannel.h b/Source/WebCore/platform/audio/AudioChannel.h
index 50ce0b7ba..c47873a11 100644
--- a/Source/WebCore/platform/audio/AudioChannel.h
+++ b/Source/WebCore/platform/audio/AudioChannel.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h b/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
index 8b69cf3fd..14393d870 100644
--- a/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
+++ b/Source/WebCore/platform/audio/AudioDSPKernelProcessor.h
@@ -55,15 +55,15 @@ public:
virtual std::unique_ptr<AudioDSPKernel> createKernel() = 0;
// AudioProcessor methods
- virtual void initialize() override;
- virtual void uninitialize() override;
- virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override;
- virtual void reset() override;
- virtual void setNumberOfChannels(unsigned) override;
- virtual unsigned numberOfChannels() const override { return m_numberOfChannels; }
+ void initialize() override;
+ void uninitialize() override;
+ void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override;
+ void reset() override;
+ void setNumberOfChannels(unsigned) override;
+ unsigned numberOfChannels() const override { return m_numberOfChannels; }
- virtual double tailTime() const override;
- virtual double latencyTime() const override;
+ double tailTime() const override;
+ double latencyTime() const override;
protected:
Vector<std::unique_ptr<AudioDSPKernel>> m_kernels;
diff --git a/Source/WebCore/platform/audio/AudioDestination.h b/Source/WebCore/platform/audio/AudioDestination.h
index 6132d8eda..10639dad2 100644
--- a/Source/WebCore/platform/audio/AudioDestination.h
+++ b/Source/WebCore/platform/audio/AudioDestination.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/AudioDestinationConsumer.h b/Source/WebCore/platform/audio/AudioDestinationConsumer.h
new file mode 100644
index 000000000..4f1f8997b
--- /dev/null
+++ b/Source/WebCore/platform/audio/AudioDestinationConsumer.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * 3. Neither the name of Google Inc. nor the names of its contributors
+ * may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AudioDestinationConsumer_h
+#define AudioDestinationConsumer_h
+
+#include <wtf/RefCounted.h>
+
+namespace WebCore {
+
+class AudioBus;
+
+class AudioDestinationConsumer : public RefCounted<AudioDestinationConsumer> {
+public:
+ virtual ~AudioDestinationConsumer() { }
+
+ virtual void setFormat(size_t numberOfChannels, float sampleRate) = 0;
+ virtual void consumeAudio(AudioBus*, size_t numberOfFrames) = 0;
+};
+
+} // WebCore
+
+#endif // AudioDestinationConsumer_h
diff --git a/Source/WebCore/platform/audio/AudioFIFO.cpp b/Source/WebCore/platform/audio/AudioFIFO.cpp
new file mode 100644
index 000000000..0b3aa499f
--- /dev/null
+++ b/Source/WebCore/platform/audio/AudioFIFO.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(WEB_AUDIO)
+
+#include "AudioFIFO.h"
+
+namespace WebCore {
+
+AudioFIFO::AudioFIFO(unsigned numberOfChannels, size_t fifoLength)
+ : m_fifoAudioBus(AudioBus::create(numberOfChannels, fifoLength))
+ , m_fifoLength(fifoLength)
+ , m_framesInFifo(0)
+ , m_readIndex(0)
+ , m_writeIndex(0)
+{
+}
+
+void AudioFIFO::consume(AudioBus* destination, size_t framesToConsume)
+{
+ bool isGood = destination && (framesToConsume <= m_fifoLength) && (framesToConsume <= m_framesInFifo) && (destination->length() >= framesToConsume);
+ ASSERT(isGood);
+ if (!isGood)
+ return;
+
+ // Copy the requested number of samples to the destination.
+
+ size_t part1Length;
+ size_t part2Length;
+ findWrapLengths(m_readIndex, framesToConsume, part1Length, part2Length);
+
+ size_t numberOfChannels = m_fifoAudioBus->numberOfChannels();
+
+ for (size_t channelIndex = 0; channelIndex < numberOfChannels; ++channelIndex) {
+ float* destinationData = destination->channel(channelIndex)->mutableData();
+ const float* sourceData = m_fifoAudioBus->channel(channelIndex)->data();
+
+ bool isCopyGood = ((m_readIndex < m_fifoLength)
+ && (m_readIndex + part1Length) <= m_fifoLength
+ && (part1Length <= destination->length())
+ && (part1Length + part2Length) <= destination->length());
+ ASSERT(isCopyGood);
+ if (!isCopyGood)
+ return;
+
+ memcpy(destinationData, sourceData + m_readIndex, part1Length * sizeof(*sourceData));
+ // Handle wrap around of the FIFO, if needed.
+ if (part2Length)
+ memcpy(destinationData + part1Length, sourceData, part2Length * sizeof(*sourceData));
+ }
+ m_readIndex = updateIndex(m_readIndex, framesToConsume);
+ ASSERT(m_framesInFifo >= framesToConsume);
+ m_framesInFifo -= framesToConsume;
+}
+
+void AudioFIFO::push(const AudioBus* sourceBus)
+{
+ // Copy the sourceBus into the FIFO buffer.
+
+ bool isGood = sourceBus && (m_framesInFifo + sourceBus->length() <= m_fifoLength);
+ if (!isGood)
+ return;
+
+ size_t sourceLength = sourceBus->length();
+ size_t part1Length;
+ size_t part2Length;
+ findWrapLengths(m_writeIndex, sourceLength, part1Length, part2Length);
+
+ size_t numberOfChannels = m_fifoAudioBus->numberOfChannels();
+
+ for (size_t channelIndex = 0; channelIndex < numberOfChannels; ++channelIndex) {
+ float* destination = m_fifoAudioBus->channel(channelIndex)->mutableData();
+ const float* source = sourceBus->channel(channelIndex)->data();
+
+ bool isCopyGood = ((m_writeIndex < m_fifoLength)
+ && (m_writeIndex + part1Length) <= m_fifoLength
+ && part2Length < m_fifoLength
+ && part1Length + part2Length <= sourceLength);
+ ASSERT(isCopyGood);
+ if (!isCopyGood)
+ return;
+
+ memcpy(destination + m_writeIndex, source, part1Length * sizeof(*destination));
+
+ // Handle wrap around of the FIFO, if needed.
+ if (part2Length)
+ memcpy(destination, source + part1Length, part2Length * sizeof(*destination));
+ }
+
+ m_framesInFifo += sourceLength;
+ ASSERT(m_framesInFifo <= m_fifoLength);
+ m_writeIndex = updateIndex(m_writeIndex, sourceLength);
+}
+
+void AudioFIFO::findWrapLengths(size_t index, size_t size, size_t& part1Length, size_t& part2Length)
+{
+ ASSERT_WITH_SECURITY_IMPLICATION(index < m_fifoLength && size <= m_fifoLength);
+ if (index < m_fifoLength && size <= m_fifoLength) {
+ if (index + size > m_fifoLength) {
+ // Need to wrap. Figure out the length of each piece.
+ part1Length = m_fifoLength - index;
+ part2Length = size - part1Length;
+ } else {
+ // No wrap needed.
+ part1Length = size;
+ part2Length = 0;
+ }
+ } else {
+ // Invalid values for index or size. Set the part lengths to zero so nothing is copied.
+ part1Length = 0;
+ part2Length = 0;
+ }
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(WEB_AUDIO)
diff --git a/Source/WebCore/platform/audio/AudioFIFO.h b/Source/WebCore/platform/audio/AudioFIFO.h
new file mode 100644
index 000000000..75c9abd74
--- /dev/null
+++ b/Source/WebCore/platform/audio/AudioFIFO.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AudioFIFO_h
+#define AudioFIFO_h
+
+#include "AudioBus.h"
+
+namespace WebCore {
+
+class AudioFIFO {
+public:
+ // Create a FIFO large enough to hold |fifoLength| frames of data of |numberOfChannels| channels.
+ AudioFIFO(unsigned numberOfChannels, size_t fifoLength);
+
+ // Push the data from the bus into the FIFO.
+ void push(const AudioBus*);
+
+ // Consume |framesToConsume| frames of data from the FIFO and put them in |destination|. The
+ // corresponding frames are removed from the FIFO.
+ void consume(AudioBus* destination, size_t framesToConsume);
+
+ // Number of frames of data that are currently in the FIFO.
+ size_t framesInFifo() const { return m_framesInFifo; }
+
+private:
+ // Update the FIFO index by the step, with appropriate wrapping around the endpoint.
+ int updateIndex(int index, int step) { return (index + step) % m_fifoLength; }
+
+ void findWrapLengths(size_t index, size_t providerSize, size_t& part1Length, size_t& part2Length);
+
+ // The FIFO itself. In reality, the FIFO is a circular buffer.
+ RefPtr<AudioBus> m_fifoAudioBus;
+
+ // The total available space in the FIFO.
+ size_t m_fifoLength;
+
+ // The number of actual elements in the FIFO
+ size_t m_framesInFifo;
+
+ // Where to start reading from the FIFO.
+ size_t m_readIndex;
+
+ // Where to start writing to the FIFO.
+ size_t m_writeIndex;
+};
+
+} // namespace WebCore
+
+#endif // AudioFIFO.h
diff --git a/Source/WebCore/platform/audio/AudioFileReader.h b/Source/WebCore/platform/audio/AudioFileReader.h
index 2130eecc1..9a801fbd4 100644
--- a/Source/WebCore/platform/audio/AudioFileReader.h
+++ b/Source/WebCore/platform/audio/AudioFileReader.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/MediaSession.cpp b/Source/WebCore/platform/audio/AudioHardwareListener.cpp
index a35d192c6..6d2df0011 100644
--- a/Source/WebCore/platform/audio/MediaSession.cpp
+++ b/Source/WebCore/platform/audio/AudioHardwareListener.cpp
@@ -24,51 +24,25 @@
*/
#include "config.h"
-#include "MediaSession.h"
-
-#include "HTMLMediaElement.h"
-#include "Logging.h"
-#include "MediaSessionManager.h"
+#include "AudioHardwareListener.h"
namespace WebCore {
-std::unique_ptr<MediaSession> MediaSession::create(MediaSessionClient& client)
+#if !PLATFORM(MAC)
+Ref<AudioHardwareListener> AudioHardwareListener::create(Client& client)
{
- return std::make_unique<MediaSession>(client);
+ return adoptRef(*new AudioHardwareListener(client));
}
+#endif
-MediaSession::MediaSession(MediaSessionClient& client)
+AudioHardwareListener::AudioHardwareListener(Client& client)
: m_client(client)
- , m_state(Running)
-{
- m_type = m_client.mediaType();
- ASSERT(m_type >= None && m_type <= WebAudio);
- MediaSessionManager::sharedManager().addSession(*this);
-}
-
-MediaSession::~MediaSession()
-{
- MediaSessionManager::sharedManager().removeSession(*this);
-}
-
-void MediaSession::beginInterruption()
-{
- LOG(Media, "MediaSession::beginInterruption");
- m_state = Interrupted;
- m_client.beginInterruption();
-}
-
-void MediaSession::endInterruption(EndInterruptionFlags flags)
-{
- LOG(Media, "MediaSession::endInterruption");
- m_state = Running;
- m_client.endInterruption(flags);
-}
-
-void MediaSession::pauseSession()
+ , m_activity(AudioHardwareActivityType::Unknown)
+ , m_outputDeviceSupportsLowPowerMode(false)
{
- LOG(Media, "MediaSession::pauseSession");
- m_client.pausePlayback();
+#if PLATFORM(IOS)
+ m_outputDeviceSupportsLowPowerMode = true;
+#endif
}
}
diff --git a/Source/WebCore/platform/audio/MediaSession.h b/Source/WebCore/platform/audio/AudioHardwareListener.h
index b53fead09..a59bc20f0 100644
--- a/Source/WebCore/platform/audio/MediaSession.h
+++ b/Source/WebCore/platform/audio/AudioHardwareListener.h
@@ -23,72 +23,48 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MediaSession_h
-#define MediaSession_h
+#ifndef AudioHardwareListener_h
+#define AudioHardwareListener_h
-#include <wtf/Noncopyable.h>
+#include "PlatformExportMacros.h"
+#include <wtf/Ref.h>
+#include <wtf/RefCounted.h>
namespace WebCore {
-
-class MediaSessionClient;
-
-class MediaSession {
-public:
- static std::unique_ptr<MediaSession> create(MediaSessionClient&);
-
- MediaSession(MediaSessionClient&);
- virtual ~MediaSession();
-
- enum MediaType {
- None = 0,
- Video,
- Audio,
- WebAudio,
- };
- MediaType mediaType() const { return m_type; }
-
- enum State {
- Running,
- Interrupted,
- };
- State state() const { return m_state; }
- void setState(State state) { m_state = state; }
+enum class AudioHardwareActivityType {
+ Unknown,
+ IsActive,
+ IsInactive
+};
- enum EndInterruptionFlags {
- NoFlags = 0,
- MayResumePlaying = 1 << 0,
+class AudioHardwareListener : public RefCounted<AudioHardwareListener> {
+public:
+ class Client {
+ public:
+ virtual ~Client() { }
+ virtual void audioHardwareDidBecomeActive() = 0;
+ virtual void audioHardwareDidBecomeInactive() = 0;
+ virtual void audioOutputDeviceChanged() = 0;
};
- void beginInterruption();
- void endInterruption(EndInterruptionFlags);
- void pauseSession();
+ WEBCORE_EXPORT static Ref<AudioHardwareListener> create(Client&);
+ virtual ~AudioHardwareListener() { }
+
+ AudioHardwareActivityType hardwareActivity() const { return m_activity; }
+ bool outputDeviceSupportsLowPowerMode() const { return m_outputDeviceSupportsLowPowerMode; }
protected:
- MediaSessionClient& client() const { return m_client; }
+ AudioHardwareListener(Client&);
-private:
- MediaSessionClient& m_client;
- MediaType m_type;
- State m_state;
-};
+ void setHardwareActivity(AudioHardwareActivityType activity) { m_activity = activity; }
+ void setOutputDeviceSupportsLowPowerMode(bool support) { m_outputDeviceSupportsLowPowerMode = support; }
-class MediaSessionClient {
- WTF_MAKE_NONCOPYABLE(MediaSessionClient);
-public:
- MediaSessionClient() { }
-
- virtual MediaSession::MediaType mediaType() const = 0;
-
- virtual void beginInterruption() { }
- virtual void endInterruption(MediaSession::EndInterruptionFlags) { }
-
- virtual void pausePlayback() = 0;
-
-protected:
- virtual ~MediaSessionClient() { }
+ Client& m_client;
+ AudioHardwareActivityType m_activity;
+ bool m_outputDeviceSupportsLowPowerMode;
};
}
-#endif // MediaSession_h
+#endif // AudioHardwareListener_h
diff --git a/Source/WebCore/platform/audio/AudioIOCallback.h b/Source/WebCore/platform/audio/AudioIOCallback.h
index 2cc3a9df2..ad23cef6a 100644
--- a/Source/WebCore/platform/audio/AudioIOCallback.h
+++ b/Source/WebCore/platform/audio/AudioIOCallback.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -40,6 +40,8 @@ public:
// Optional audio input is given in sourceBus (if it's not 0).
virtual void render(AudioBus* sourceBus, AudioBus* destinationBus, size_t framesToProcess) = 0;
+ virtual void isPlayingDidChange() = 0;
+
virtual ~AudioIOCallback() { }
};
diff --git a/Source/WebCore/platform/audio/AudioPullFIFO.cpp b/Source/WebCore/platform/audio/AudioPullFIFO.cpp
new file mode 100644
index 000000000..2b1a62226
--- /dev/null
+++ b/Source/WebCore/platform/audio/AudioPullFIFO.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(WEB_AUDIO)
+
+#include "AudioPullFIFO.h"
+
+namespace WebCore {
+
+AudioPullFIFO::AudioPullFIFO(AudioSourceProvider& audioProvider, unsigned numberOfChannels, size_t fifoLength, size_t providerSize)
+ : m_provider(audioProvider)
+ , m_fifo(numberOfChannels, fifoLength)
+ , m_providerSize(providerSize)
+ , m_tempBus(AudioBus::create(numberOfChannels, providerSize))
+{
+}
+
+void AudioPullFIFO::consume(AudioBus* destination, size_t framesToConsume)
+{
+ if (!destination)
+ return;
+
+ if (framesToConsume > m_fifo.framesInFifo()) {
+ // We don't have enough data in the FIFO to fulfill the request. Ask for more data.
+ fillBuffer(framesToConsume - m_fifo.framesInFifo());
+ }
+
+ m_fifo.consume(destination, framesToConsume);
+}
+
+void AudioPullFIFO::fillBuffer(size_t numberOfFrames)
+{
+ // Keep asking the provider to give us data until we have received at least |numberOfFrames| of
+ // data. Stuff the data into the FIFO.
+ size_t framesProvided = 0;
+
+ while (framesProvided < numberOfFrames) {
+ m_provider.provideInput(m_tempBus.get(), m_providerSize);
+
+ m_fifo.push(m_tempBus.get());
+
+ framesProvided += m_providerSize;
+ }
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(WEB_AUDIO)
diff --git a/Source/WebCore/platform/audio/AudioPullFIFO.h b/Source/WebCore/platform/audio/AudioPullFIFO.h
new file mode 100644
index 000000000..bfbe983a5
--- /dev/null
+++ b/Source/WebCore/platform/audio/AudioPullFIFO.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AudioPullFIFO_h
+#define AudioPullFIFO_h
+
+#include "AudioBus.h"
+#include "AudioFIFO.h"
+#include "AudioSourceProvider.h"
+
+namespace WebCore {
+
+// A FIFO (First In First Out) buffer to handle mismatches in buffer sizes between a provider and
+// receiver. The receiver will "pull" data from this FIFO. If data is already available in the
+// FIFO, it is provided to the receiver. If insufficient data is available to satisfy the request,
+// the FIFO will ask the provider for more data when necessary to fulfill a request. Contrast this
+// with a "push" FIFO, where the sender pushes data to the FIFO which will itself push the data to
+// the receiver when the FIFO is full.
+class AudioPullFIFO {
+public:
+ // Create a FIFO that gets data from |provider|. The FIFO will be large enough to hold
+ // |fifoLength| frames of data of |numberOfChannels| channels. The AudioSourceProvider will be
+ // asked to produce |providerSize| frames when the FIFO needs more data.
+ AudioPullFIFO(AudioSourceProvider& audioProvider, unsigned numberOfChannels, size_t fifoLength, size_t providerSize);
+
+ // Read |framesToConsume| frames from the FIFO into the destination. If the FIFO does not have
+ // enough data, we ask the |provider| to get more data to fulfill the request.
+ void consume(AudioBus* destination, size_t framesToConsume);
+
+private:
+ // Fill the FIFO buffer with at least |numberOfFrames| more data.
+ void fillBuffer(size_t numberOfFrames);
+
+ // The provider of the data in our FIFO.
+ AudioSourceProvider& m_provider;
+
+ // The actual FIFO
+ AudioFIFO m_fifo;
+
+ // Number of frames of data that the provider will produce per call.
+ unsigned int m_providerSize;
+
+ // Temporary workspace to hold the data from the provider.
+ RefPtr<AudioBus> m_tempBus;
+};
+
+} // namespace WebCore
+
+#endif // AudioPullFIFO.h
diff --git a/Source/WebCore/platform/audio/AudioSession.cpp b/Source/WebCore/platform/audio/AudioSession.cpp
index 7529a1bd9..3c2daca01 100644
--- a/Source/WebCore/platform/audio/AudioSession.cpp
+++ b/Source/WebCore/platform/audio/AudioSession.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,40 +28,18 @@
#if USE(AUDIO_SESSION)
-#include "AudioSessionListener.h"
#include "NotImplemented.h"
+#include <wtf/NeverDestroyed.h>
namespace WebCore {
AudioSession& AudioSession::sharedSession()
{
- DEFINE_STATIC_LOCAL(AudioSession, session, ());
+ static NeverDestroyed<AudioSession> session;
return session;
}
-void AudioSession::addListener(AudioSessionListener* listener)
-{
- m_listeners.add(listener);
-}
-
-void AudioSession::removeListener(AudioSessionListener* listener)
-{
- m_listeners.remove(listener);
-}
-
-void AudioSession::beganAudioInterruption()
-{
- for (HashSet<AudioSessionListener*>::iterator i = m_listeners.begin(); i != m_listeners.end(); ++i)
- (*i)->beganAudioInterruption();
-}
-
-void AudioSession::endedAudioInterruption()
-{
- for (HashSet<AudioSessionListener*>::iterator i = m_listeners.begin(); i != m_listeners.end(); ++i)
- (*i)->endedAudioInterruption();
-}
-
-#if !PLATFORM(IOS) && !PLATFORM(MAC)
+#if !PLATFORM(COCOA)
class AudioSessionPrivate {
};
@@ -109,9 +87,10 @@ size_t AudioSession::numberOfOutputChannels() const
return 0;
}
-void AudioSession::setActive(bool)
+bool AudioSession::tryToSetActive(bool)
{
notImplemented();
+ return true;
}
size_t AudioSession::preferredBufferSize() const
@@ -124,7 +103,7 @@ void AudioSession::setPreferredBufferSize(size_t)
{
notImplemented();
}
-#endif // !PLATFORM(IOS)
+#endif // !PLATFORM(COCOA)
}
diff --git a/Source/WebCore/platform/audio/AudioSession.h b/Source/WebCore/platform/audio/AudioSession.h
index 4fff5a668..e51a72173 100644
--- a/Source/WebCore/platform/audio/AudioSession.h
+++ b/Source/WebCore/platform/audio/AudioSession.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,21 +26,22 @@
#ifndef AudioSession_h
#define AudioSession_h
+#include "PlatformExportMacros.h"
+
#if USE(AUDIO_SESSION)
#include <memory>
-#include <wtf/HashSet.h>
+#include <wtf/NeverDestroyed.h>
#include <wtf/Noncopyable.h>
namespace WebCore {
-class AudioSessionListener;
class AudioSessionPrivate;
class AudioSession {
WTF_MAKE_NONCOPYABLE(AudioSession);
public:
- static AudioSession& sharedSession();
+ WEBCORE_EXPORT static AudioSession& sharedSession();
enum CategoryType {
None,
@@ -51,32 +52,26 @@ public:
PlayAndRecord,
AudioProcessing,
};
- void setCategory(CategoryType);
+ WEBCORE_EXPORT void setCategory(CategoryType);
CategoryType category() const;
void setCategoryOverride(CategoryType);
CategoryType categoryOverride() const;
- void addListener(AudioSessionListener*);
- void removeListener(AudioSessionListener*);
-
float sampleRate() const;
size_t numberOfOutputChannels() const;
- void setActive(bool);
+ bool tryToSetActive(bool);
size_t preferredBufferSize() const;
void setPreferredBufferSize(size_t);
- void beganAudioInterruption();
- void endedAudioInterruption();
-
private:
+ friend class NeverDestroyed<AudioSession>;
AudioSession();
~AudioSession();
std::unique_ptr<AudioSessionPrivate> m_private;
- HashSet<AudioSessionListener*> m_listeners;
};
}
diff --git a/Source/WebCore/platform/audio/AudioSourceProvider.h b/Source/WebCore/platform/audio/AudioSourceProvider.h
index d891b3911..7708b5264 100644
--- a/Source/WebCore/platform/audio/AudioSourceProvider.h
+++ b/Source/WebCore/platform/audio/AudioSourceProvider.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/AudioStreamDescription.h b/Source/WebCore/platform/audio/AudioStreamDescription.h
new file mode 100644
index 000000000..e440dc90a
--- /dev/null
+++ b/Source/WebCore/platform/audio/AudioStreamDescription.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/Variant.h>
+
+typedef struct AudioStreamBasicDescription AudioStreamBasicDescription;
+
+namespace WebCore {
+
+struct PlatformDescription {
+ enum {
+ None,
+ CAAudioStreamBasicType,
+ } type;
+ Variant<std::nullptr_t, const AudioStreamBasicDescription*> description;
+};
+
+class AudioStreamDescription {
+public:
+ virtual ~AudioStreamDescription() = default;
+
+ virtual const PlatformDescription& platformDescription() const = 0;
+
+ enum PCMFormat {
+ None,
+ Int16,
+ Int32,
+ Float32,
+ Float64
+ };
+ virtual PCMFormat format() const = 0;
+
+ virtual double sampleRate() const = 0;
+
+ virtual bool isPCM() const { return format() != None; }
+ virtual bool isInterleaved() const = 0;
+ virtual bool isSignedInteger() const = 0;
+ virtual bool isFloat() const = 0;
+ virtual bool isNativeEndian() const = 0;
+
+ virtual uint32_t numberOfInterleavedChannels() const = 0;
+ virtual uint32_t numberOfChannelStreams() const = 0;
+ virtual uint32_t numberOfChannels() const = 0;
+ virtual uint32_t sampleWordSize() const = 0;
+};
+
+}
diff --git a/Source/WebCore/platform/audio/Biquad.cpp b/Source/WebCore/platform/audio/Biquad.cpp
index 97ebb31fb..765386896 100644
--- a/Source/WebCore/platform/audio/Biquad.cpp
+++ b/Source/WebCore/platform/audio/Biquad.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -37,7 +37,7 @@
#include <stdio.h>
#include <wtf/MathExtras.h>
-#if OS(DARWIN)
+#if USE(ACCELERATE)
// Work around a bug where VForce.h forward declares std::complex in a way that's incompatible with libc++ complex.
#define __VFORCE_H
#include <Accelerate/Accelerate.h>
@@ -45,22 +45,18 @@
namespace WebCore {
+#if USE(ACCELERATE)
const int kBufferSize = 1024;
+#endif
Biquad::Biquad()
{
-#if OS(DARWIN)
+#if USE(ACCELERATE)
// Allocate two samples more for filter history
m_inputBuffer.allocate(kBufferSize + 2);
m_outputBuffer.allocate(kBufferSize + 2);
#endif
-#if USE(WEBAUDIO_IPP)
- int bufferSize;
- ippsIIRGetStateSize64f_BiQuad_32f(1, &bufferSize);
- m_ippInternalBuffer = ippsMalloc_8u(bufferSize);
-#endif // USE(WEBAUDIO_IPP)
-
// Initialize as pass-thru (straight-wire, no filter effect)
setNormalizedCoefficients(1, 0, 0, 1, 0, 0);
@@ -69,20 +65,15 @@ Biquad::Biquad()
Biquad::~Biquad()
{
-#if USE(WEBAUDIO_IPP)
- ippsFree(m_ippInternalBuffer);
-#endif // USE(WEBAUDIO_IPP)
}
void Biquad::process(const float* sourceP, float* destP, size_t framesToProcess)
{
-#if OS(DARWIN)
+#if USE(ACCELERATE)
// Use vecLib if available
processFast(sourceP, destP, framesToProcess);
-#elif USE(WEBAUDIO_IPP)
- ippsIIR64f_32f(sourceP, destP, static_cast<int>(framesToProcess), m_biquadState);
-#else // USE(WEBAUDIO_IPP)
+#else
int n = framesToProcess;
@@ -127,7 +118,7 @@ void Biquad::process(const float* sourceP, float* destP, size_t framesToProcess)
#endif
}
-#if OS(DARWIN)
+#if USE(ACCELERATE)
// Here we have optimized version using Accelerate.framework
@@ -181,12 +172,12 @@ void Biquad::processSliceFast(double* sourceP, double* destP, double* coefficien
destP[1] = destP[framesToProcess - 1 + 2];
}
-#endif // OS(DARWIN)
+#endif // USE(ACCELERATE)
void Biquad::reset()
{
-#if OS(DARWIN)
+#if USE(ACCELERATE)
// Two extra samples for filter history
double* inputP = m_inputBuffer.data();
inputP[0] = 0;
@@ -196,11 +187,6 @@ void Biquad::reset()
outputP[0] = 0;
outputP[1] = 0;
-#elif USE(WEBAUDIO_IPP)
- int bufferSize;
- ippsIIRGetStateSize64f_BiQuad_32f(1, &bufferSize);
- ippsZero_8u(m_ippInternalBuffer, bufferSize);
-
#else
m_x1 = m_x2 = m_y1 = m_y2 = 0;
#endif
@@ -289,19 +275,6 @@ void Biquad::setNormalizedCoefficients(double b0, double b1, double b2, double a
m_b2 = b2 * a0Inverse;
m_a1 = a1 * a0Inverse;
m_a2 = a2 * a0Inverse;
-
-#if USE(WEBAUDIO_IPP)
- Ipp64f taps[6];
- taps[0] = m_b0;
- taps[1] = m_b1;
- taps[2] = m_b2;
- taps[3] = 1;
- taps[4] = m_a1;
- taps[5] = m_a2;
- m_biquadState = 0;
-
- ippsIIRInit64f_BiQuad_32f(&m_biquadState, taps, 1, 0, m_ippInternalBuffer);
-#endif // USE(WEBAUDIO_IPP)
}
void Biquad::setLowShelfParams(double frequency, double dbGain)
diff --git a/Source/WebCore/platform/audio/Biquad.h b/Source/WebCore/platform/audio/Biquad.h
index e3e612706..bb721a23a 100644
--- a/Source/WebCore/platform/audio/Biquad.h
+++ b/Source/WebCore/platform/audio/Biquad.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,10 +32,6 @@
#include "AudioArray.h"
#include <complex>
#include <sys/types.h>
-
-#if USE(WEBAUDIO_IPP)
-#include <ipps.h>
-#endif // USE(WEBAUDIO_IPP)
namespace WebCore {
@@ -44,10 +40,10 @@ namespace WebCore {
// It can be configured to a number of common and very useful filters:
// lowpass, highpass, shelving, parameteric, notch, allpass, ...
-class Biquad {
+class Biquad final {
public:
Biquad();
- virtual ~Biquad();
+ ~Biquad();
void process(const float* sourceP, float* destP, size_t framesToProcess);
@@ -92,17 +88,13 @@ private:
double m_a1;
double m_a2;
-#if OS(DARWIN)
+#if USE(ACCELERATE)
void processFast(const float* sourceP, float* destP, size_t framesToProcess);
void processSliceFast(double* sourceP, double* destP, double* coefficientsP, size_t framesToProcess);
AudioDoubleArray m_inputBuffer;
AudioDoubleArray m_outputBuffer;
-#elif USE(WEBAUDIO_IPP)
- IppsIIRState64f_32f* m_biquadState;
- Ipp8u* m_ippInternalBuffer;
-
#else
// Filter memory
double m_x1; // input delayed by 1 sample
diff --git a/Source/WebCore/platform/audio/Cone.cpp b/Source/WebCore/platform/audio/Cone.cpp
index f514cdeb5..c87dc7b4b 100644
--- a/Source/WebCore/platform/audio/Cone.cpp
+++ b/Source/WebCore/platform/audio/Cone.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/Cone.h b/Source/WebCore/platform/audio/Cone.h
index f5660182d..d81c05dc6 100644
--- a/Source/WebCore/platform/audio/Cone.h
+++ b/Source/WebCore/platform/audio/Cone.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/DirectConvolver.cpp b/Source/WebCore/platform/audio/DirectConvolver.cpp
index 62c4e68af..2c638e27f 100644
--- a/Source/WebCore/platform/audio/DirectConvolver.cpp
+++ b/Source/WebCore/platform/audio/DirectConvolver.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -44,9 +44,6 @@ using namespace VectorMath;
DirectConvolver::DirectConvolver(size_t inputBlockSize)
: m_inputBlockSize(inputBlockSize)
-#if USE(WEBAUDIO_IPP)
- , m_overlayBuffer(inputBlockSize)
-#endif // USE(WEBAUDIO_IPP)
, m_buffer(inputBlockSize * 2)
{
}
@@ -71,27 +68,17 @@ void DirectConvolver::process(AudioFloatArray* convolutionKernel, const float* s
if (!isCopyGood)
return;
-#if USE(WEBAUDIO_IPP)
- float* outputBuffer = m_buffer.data();
- float* overlayBuffer = m_overlayBuffer.data();
- bool isCopyGood2 = overlayBuffer && m_overlayBuffer.size() >= kernelSize && m_buffer.size() == m_inputBlockSize * 2;
- ASSERT(isCopyGood2);
- if (!isCopyGood2)
- return;
-
- ippsConv_32f(static_cast<const Ipp32f*>(sourceP), framesToProcess, static_cast<Ipp32f*>(kernelP), kernelSize, static_cast<Ipp32f*>(outputBuffer));
-
- vadd(outputBuffer, 1, overlayBuffer, 1, destP, 1, framesToProcess);
- memcpy(overlayBuffer, outputBuffer + m_inputBlockSize, sizeof(float) * kernelSize);
-#else
float* inputP = m_buffer.data() + m_inputBlockSize;
// Copy samples to 2nd half of input buffer.
memcpy(inputP, sourceP, sizeof(float) * framesToProcess);
-#if OS(DARWIN)
+#if USE(ACCELERATE)
#if defined(__ppc__) || defined(__i386__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
conv(inputP - kernelSize + 1, 1, kernelP + kernelSize - 1, -1, destP, 1, framesToProcess, kernelSize);
+#pragma clang diagnostic pop
#else
vDSP_conv(inputP - kernelSize + 1, 1, kernelP + kernelSize - 1, -1, destP, 1, framesToProcess, kernelSize);
#endif // defined(__ppc__) || defined(__i386__)
@@ -365,19 +352,15 @@ void DirectConvolver::process(AudioFloatArray* convolutionKernel, const float* s
}
destP[i++] = sum;
}
-#endif // OS(DARWIN)
+#endif // USE(ACCELERATE)
// Copy 2nd half of input buffer to 1st half.
memcpy(m_buffer.data(), inputP, sizeof(float) * framesToProcess);
-#endif
}
void DirectConvolver::reset()
{
m_buffer.zero();
-#if USE(WEBAUDIO_IPP)
- m_overlayBuffer.zero();
-#endif // USE(WEBAUDIO_IPP)
}
} // namespace WebCore
diff --git a/Source/WebCore/platform/audio/DirectConvolver.h b/Source/WebCore/platform/audio/DirectConvolver.h
index 1cb8c672d..7cd879d45 100644
--- a/Source/WebCore/platform/audio/DirectConvolver.h
+++ b/Source/WebCore/platform/audio/DirectConvolver.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,10 +31,6 @@
#include "AudioArray.h"
-#if USE(WEBAUDIO_IPP)
-#include <ipps.h>
-#endif // USE(WEBAUDIO_IPP)
-
namespace WebCore {
class DirectConvolver {
@@ -48,9 +44,6 @@ public:
private:
size_t m_inputBlockSize;
-#if USE(WEBAUDIO_IPP)
- AudioFloatArray m_overlayBuffer;
-#endif // USE(WEBAUDIO_IPP)
AudioFloatArray m_buffer;
};
diff --git a/Source/WebCore/platform/audio/Distance.cpp b/Source/WebCore/platform/audio/Distance.cpp
index c7a64574b..fae212cf4 100644
--- a/Source/WebCore/platform/audio/Distance.cpp
+++ b/Source/WebCore/platform/audio/Distance.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -38,7 +38,7 @@
namespace WebCore {
DistanceEffect::DistanceEffect()
- : m_model(ModelInverse)
+ : m_model(DistanceModelType::Inverse)
, m_isClamped(true)
, m_refDistance(1.0)
, m_maxDistance(10000.0)
@@ -56,11 +56,11 @@ double DistanceEffect::gain(double distance)
distance = std::max(distance, m_refDistance);
switch (m_model) {
- case ModelLinear:
+ case DistanceModelType::Linear:
return linearGain(distance);
- case ModelInverse:
+ case DistanceModelType::Inverse:
return inverseGain(distance);
- case ModelExponential:
+ case DistanceModelType::Exponential:
return exponentialGain(distance);
}
ASSERT_NOT_REACHED();
diff --git a/Source/WebCore/platform/audio/Distance.h b/Source/WebCore/platform/audio/Distance.h
index c5b6cc949..91d349b97 100644
--- a/Source/WebCore/platform/audio/Distance.h
+++ b/Source/WebCore/platform/audio/Distance.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -34,22 +34,22 @@ namespace WebCore {
// Distance models are defined according to the OpenAL specification:
// http://connect.creativelabs.com/openal/Documentation/OpenAL%201.1%20Specification.htm.
+enum class DistanceModelType {
+ Linear,
+ Inverse,
+ Exponential
+};
+
class DistanceEffect {
public:
- enum ModelType {
- ModelLinear = 0,
- ModelInverse = 1,
- ModelExponential = 2
- };
-
DistanceEffect();
// Returns scalar gain for the given distance the current distance model is used
double gain(double distance);
- ModelType model() { return m_model; }
+ DistanceModelType model() { return m_model; }
- void setModel(ModelType model, bool clamped)
+ void setModel(DistanceModelType model, bool clamped)
{
m_model = model;
m_isClamped = clamped;
@@ -69,7 +69,7 @@ protected:
double inverseGain(double distance);
double exponentialGain(double distance);
- ModelType m_model;
+ DistanceModelType m_model;
bool m_isClamped;
double m_refDistance;
double m_maxDistance;
diff --git a/Source/WebCore/platform/audio/DownSampler.cpp b/Source/WebCore/platform/audio/DownSampler.cpp
index e5c283269..9a9546d98 100644
--- a/Source/WebCore/platform/audio/DownSampler.cpp
+++ b/Source/WebCore/platform/audio/DownSampler.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/DownSampler.h b/Source/WebCore/platform/audio/DownSampler.h
index b2604359f..7a556ac76 100644
--- a/Source/WebCore/platform/audio/DownSampler.h
+++ b/Source/WebCore/platform/audio/DownSampler.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/DynamicsCompressor.cpp b/Source/WebCore/platform/audio/DynamicsCompressor.cpp
index 8942b3125..30125aeb3 100644
--- a/Source/WebCore/platform/audio/DynamicsCompressor.cpp
+++ b/Source/WebCore/platform/audio/DynamicsCompressor.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/DynamicsCompressor.h b/Source/WebCore/platform/audio/DynamicsCompressor.h
index f32d6d04e..04133ee53 100644
--- a/Source/WebCore/platform/audio/DynamicsCompressor.h
+++ b/Source/WebCore/platform/audio/DynamicsCompressor.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp b/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
index 3d9929452..734b066cd 100644
--- a/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
+++ b/Source/WebCore/platform/audio/DynamicsCompressorKernel.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/DynamicsCompressorKernel.h b/Source/WebCore/platform/audio/DynamicsCompressorKernel.h
index 18834afd9..bf1556884 100644
--- a/Source/WebCore/platform/audio/DynamicsCompressorKernel.h
+++ b/Source/WebCore/platform/audio/DynamicsCompressorKernel.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/EqualPowerPanner.cpp b/Source/WebCore/platform/audio/EqualPowerPanner.cpp
index 986a9bdbe..5640282c2 100644
--- a/Source/WebCore/platform/audio/EqualPowerPanner.cpp
+++ b/Source/WebCore/platform/audio/EqualPowerPanner.cpp
@@ -39,7 +39,7 @@ const float SmoothingTimeConstant = 0.050f;
namespace WebCore {
EqualPowerPanner::EqualPowerPanner(float sampleRate)
- : Panner(PanningModelEqualPower)
+ : Panner(PanningModelType::Equalpower)
, m_isFirstRender(true)
, m_gainL(0.0)
, m_gainR(0.0)
diff --git a/Source/WebCore/platform/audio/EqualPowerPanner.h b/Source/WebCore/platform/audio/EqualPowerPanner.h
index 173333de5..634f17c75 100644
--- a/Source/WebCore/platform/audio/EqualPowerPanner.h
+++ b/Source/WebCore/platform/audio/EqualPowerPanner.h
@@ -35,12 +35,12 @@ class EqualPowerPanner : public Panner {
public:
EqualPowerPanner(float sampleRate);
- virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBuf, size_t framesToProcess);
+ void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBuf, size_t framesToProcess) override;
- virtual void reset() { m_isFirstRender = true; }
+ void reset() override { m_isFirstRender = true; }
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ double tailTime() const override { return 0; }
+ double latencyTime() const override { return 0; }
private:
// For smoothing / de-zippering
diff --git a/Source/WebCore/platform/audio/FFTConvolver.cpp b/Source/WebCore/platform/audio/FFTConvolver.cpp
index fa158696e..f4b5ccbe4 100644
--- a/Source/WebCore/platform/audio/FFTConvolver.cpp
+++ b/Source/WebCore/platform/audio/FFTConvolver.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/FFTConvolver.h b/Source/WebCore/platform/audio/FFTConvolver.h
index 375bf2c65..4542b6fb2 100644
--- a/Source/WebCore/platform/audio/FFTConvolver.h
+++ b/Source/WebCore/platform/audio/FFTConvolver.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/FFTFrame.cpp b/Source/WebCore/platform/audio/FFTFrame.cpp
index 3247b2b3b..4c02b6992 100644
--- a/Source/WebCore/platform/audio/FFTFrame.cpp
+++ b/Source/WebCore/platform/audio/FFTFrame.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/FFTFrame.h b/Source/WebCore/platform/audio/FFTFrame.h
index cd0483056..804d552fa 100644
--- a/Source/WebCore/platform/audio/FFTFrame.h
+++ b/Source/WebCore/platform/audio/FFTFrame.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,22 +31,6 @@
#include "AudioArray.h"
-#if OS(DARWIN) && !USE(WEBAUDIO_FFMPEG)
-#define USE_ACCELERATE_FFT 1
-#else
-#define USE_ACCELERATE_FFT 0
-#endif
-
-#if USE_ACCELERATE_FFT
-#include <Accelerate/Accelerate.h>
-#endif
-
-#if !USE_ACCELERATE_FFT
-
-#if USE(WEBAUDIO_MKL)
-#include "mkl_dfti.h"
-#endif // USE(WEBAUDIO_MKL)
-
#if USE(WEBAUDIO_GSTREAMER)
#include <glib.h>
G_BEGIN_DECLS
@@ -54,19 +38,10 @@ G_BEGIN_DECLS
G_END_DECLS
#endif // USE(WEBAUDIO_GSTREAMER)
-#if USE(WEBAUDIO_OPENMAX_DL_FFT)
-#include "dl/sp/api/armSP.h"
-#include "dl/sp/api/omxSP.h"
-#elif USE(WEBAUDIO_FFMPEG)
-struct RDFTContext;
+#if USE(ACCELERATE)
+#include <Accelerate/Accelerate.h>
#endif
-#endif // !USE_ACCELERATE_FFT
-
-#if USE(WEBAUDIO_IPP)
-#include <ipps.h>
-#endif // USE(WEBAUDIO_IPP)
-
#include <memory>
#include <wtf/Forward.h>
#include <wtf/Threading.h>
@@ -115,7 +90,7 @@ private:
void interpolateFrequencyComponents(const FFTFrame& frame1, const FFTFrame& frame2, double x);
-#if USE_ACCELERATE_FFT
+#if USE(ACCELERATE)
DSPSplitComplex& dspSplitComplex() { return m_frame; }
DSPSplitComplex dspSplitComplex() const { return m_frame; }
@@ -128,37 +103,7 @@ private:
DSPSplitComplex m_frame;
AudioFloatArray m_realData;
AudioFloatArray m_imagData;
-#else // !USE_ACCELERATE_FFT
-
-#if USE(WEBAUDIO_MKL)
- // Interleaves the planar real and imaginary data and returns a
- // pointer to the resulting storage which can be used for in-place
- // or out-of-place operations. FIXME: ideally all of the MKL
- // routines would operate on planar data and this method would be
- // removed.
- float* getUpToDateComplexData();
-
- static DFTI_DESCRIPTOR_HANDLE descriptorHandleForSize(unsigned fftSize);
-
- static DFTI_DESCRIPTOR_HANDLE* descriptorHandles;
-
- DFTI_DESCRIPTOR_HANDLE m_handle;
- AudioFloatArray m_complexData;
- AudioFloatArray m_realData;
- AudioFloatArray m_imagData;
-#endif // USE(WEBAUDIO_MKL)
-
-#if USE(WEBAUDIO_FFMPEG)
- static RDFTContext* contextForSize(unsigned fftSize, int trans);
-
- RDFTContext* m_forwardContext;
- RDFTContext* m_inverseContext;
-
- float* getUpToDateComplexData();
- AudioFloatArray m_complexData;
- AudioFloatArray m_realData;
- AudioFloatArray m_imagData;
-#endif // USE(WEBAUDIO_FFMPEG)
+#endif
#if USE(WEBAUDIO_GSTREAMER)
GstFFTF32* m_fft;
@@ -167,29 +112,6 @@ private:
AudioFloatArray m_realData;
AudioFloatArray m_imagData;
#endif // USE(WEBAUDIO_GSTREAMER)
-
-#if USE(WEBAUDIO_IPP)
- Ipp8u* m_buffer;
- IppsDFTSpec_R_32f* m_DFTSpec;
-
- float* getUpToDateComplexData();
- AudioFloatArray m_complexData;
- AudioFloatArray m_realData;
- AudioFloatArray m_imagData;
-#endif // USE(WEBAUDIO_IPP)
-
-#if USE(WEBAUDIO_OPENMAX_DL_FFT)
- static OMXFFTSpec_R_F32* contextForSize(unsigned log2FFTSize);
-
- OMXFFTSpec_R_F32* m_forwardContext;
- OMXFFTSpec_R_F32* m_inverseContext;
-
- AudioFloatArray m_complexData;
- AudioFloatArray m_realData;
- AudioFloatArray m_imagData;
-#endif
-
-#endif // !USE_ACCELERATE_FFT
};
} // namespace WebCore
diff --git a/Source/WebCore/platform/audio/FFTFrameStub.cpp b/Source/WebCore/platform/audio/FFTFrameStub.cpp
index 3ad3284c3..3e4257a02 100644
--- a/Source/WebCore/platform/audio/FFTFrameStub.cpp
+++ b/Source/WebCore/platform/audio/FFTFrameStub.cpp
@@ -29,7 +29,7 @@
#if ENABLE(WEB_AUDIO)
-#if !OS(DARWIN) && !USE(WEBAUDIO_MKL) && !USE(WEBAUDIO_FFMPEG) && !USE(WEBAUDIO_GSTREAMER) && !USE(WEBAUDIO_IPP) && !USE(WEBAUDIO_OPENMAX_DL_FFT)
+#if !OS(DARWIN) && !USE(WEBAUDIO_GSTREAMER)
#include "FFTFrame.h"
@@ -102,6 +102,6 @@ float* FFTFrame::imagData() const
} // namespace WebCore
-#endif // !OS(DARWIN) && !USE(WEBAUDIO_MKL) && !USE(WEBAUDIO_FFMPEG) && !USE(WEBAUDIO_GSTREAMER) && !USE(WEBAUDIO_IPP)
+#endif // !OS(DARWIN) && !USE(WEBAUDIO_GSTREAMER)
#endif // ENABLE(WEB_AUDIO)
diff --git a/Source/WebCore/platform/audio/HRTFDatabase.cpp b/Source/WebCore/platform/audio/HRTFDatabase.cpp
index 6695b5bd6..c03dd3463 100644
--- a/Source/WebCore/platform/audio/HRTFDatabase.cpp
+++ b/Source/WebCore/platform/audio/HRTFDatabase.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -54,7 +54,7 @@ HRTFDatabase::HRTFDatabase(float sampleRate)
if (!hrtfElevation.get())
return;
- m_elevations[elevationIndex] = std::move(hrtfElevation);
+ m_elevations[elevationIndex] = WTFMove(hrtfElevation);
elevationIndex += InterpolationFactor;
}
diff --git a/Source/WebCore/platform/audio/HRTFDatabase.h b/Source/WebCore/platform/audio/HRTFDatabase.h
index 93bd8f385..209b6c42b 100644
--- a/Source/WebCore/platform/audio/HRTFDatabase.h
+++ b/Source/WebCore/platform/audio/HRTFDatabase.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -33,7 +33,6 @@
#include <memory>
#include <wtf/Forward.h>
#include <wtf/Noncopyable.h>
-#include <wtf/PassRefPtr.h>
#include <wtf/Vector.h>
namespace WebCore {
diff --git a/Source/WebCore/platform/audio/HRTFDatabaseLoader.cpp b/Source/WebCore/platform/audio/HRTFDatabaseLoader.cpp
index af8d6958f..f71333c5a 100644
--- a/Source/WebCore/platform/audio/HRTFDatabaseLoader.cpp
+++ b/Source/WebCore/platform/audio/HRTFDatabaseLoader.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -49,9 +49,7 @@ PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIf
{
ASSERT(isMainThread());
- RefPtr<HRTFDatabaseLoader> loader;
-
- loader = loaderMap().get(sampleRate);
+ RefPtr<HRTFDatabaseLoader> loader = loaderMap().get(sampleRate);
if (loader) {
ASSERT(sampleRate == loader->databaseSampleRate());
return loader;
@@ -104,7 +102,7 @@ void HRTFDatabaseLoader::loadAsynchronously()
{
ASSERT(isMainThread());
- MutexLocker locker(m_threadLock);
+ LockHolder locker(m_threadLock);
if (!m_hrtfDatabase.get() && !m_databaseLoaderThread) {
// Start the asynchronous database loading process.
@@ -119,7 +117,7 @@ bool HRTFDatabaseLoader::isLoaded() const
void HRTFDatabaseLoader::waitForLoaderThreadCompletion()
{
- MutexLocker locker(m_threadLock);
+ LockHolder locker(m_threadLock);
// waitForThreadCompletion() should not be called twice for the same thread.
if (m_databaseLoaderThread)
diff --git a/Source/WebCore/platform/audio/HRTFDatabaseLoader.h b/Source/WebCore/platform/audio/HRTFDatabaseLoader.h
index 27405c290..c869d137a 100644
--- a/Source/WebCore/platform/audio/HRTFDatabaseLoader.h
+++ b/Source/WebCore/platform/audio/HRTFDatabaseLoader.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,9 +32,9 @@
#include "HRTFDatabase.h"
#include <memory>
#include <wtf/HashMap.h>
+#include <wtf/Lock.h>
#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
-#include <wtf/RefPtr.h>
#include <wtf/Threading.h>
namespace WebCore {
@@ -76,7 +76,7 @@ private:
std::unique_ptr<HRTFDatabase> m_hrtfDatabase;
// Holding a m_threadLock is required when accessing m_databaseLoaderThread.
- Mutex m_threadLock;
+ Lock m_threadLock;
ThreadIdentifier m_databaseLoaderThread;
float m_databaseSampleRate;
diff --git a/Source/WebCore/platform/audio/HRTFElevation.cpp b/Source/WebCore/platform/audio/HRTFElevation.cpp
index 982d8d48f..f65cde3c1 100644
--- a/Source/WebCore/platform/audio/HRTFElevation.cpp
+++ b/Source/WebCore/platform/audio/HRTFElevation.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -40,6 +40,7 @@
#include "HRTFPanner.h"
#include <algorithm>
#include <math.h>
+#include <wtf/NeverDestroyed.h>
namespace WebCore {
@@ -58,7 +59,7 @@ const size_t ResponseFrameSize = 256;
// The impulse responses may be resampled to a different sample-rate (depending on the audio hardware) when they are loaded.
const float ResponseSampleRate = 44100;
-#if PLATFORM(MAC) || USE(WEBAUDIO_GSTREAMER)
+#if PLATFORM(COCOA) || USE(WEBAUDIO_GSTREAMER)
#define USE_CONCATENATED_IMPULSE_RESPONSES
#endif
@@ -68,18 +69,18 @@ const float ResponseSampleRate = 44100;
static AudioBus* getConcatenatedImpulseResponsesForSubject(const String& subjectName)
{
typedef HashMap<String, AudioBus*> AudioBusMap;
- DEFINE_STATIC_LOCAL(AudioBusMap, audioBusMap, ());
+ static NeverDestroyed<AudioBusMap> audioBusMap;
AudioBus* bus;
- AudioBusMap::iterator iterator = audioBusMap.find(subjectName);
- if (iterator == audioBusMap.end()) {
- RefPtr<AudioBus> concatenatedImpulseResponses = AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate);
+ AudioBusMap::iterator iterator = audioBusMap.get().find(subjectName);
+ if (iterator == audioBusMap.get().end()) {
+ auto concatenatedImpulseResponses = AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate);
ASSERT(concatenatedImpulseResponses);
if (!concatenatedImpulseResponses)
return 0;
- bus = concatenatedImpulseResponses.release().leakRef();
- audioBusMap.set(subjectName, bus);
+ bus = concatenatedImpulseResponses.leakRef();
+ audioBusMap.get().set(subjectName, bus);
} else
bus = iterator->value;
@@ -207,7 +208,7 @@ bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevati
// The range of elevations for the IRCAM impulse responses varies depending on azimuth, but the minimum elevation appears to always be -45.
//
// Here's how it goes:
-static int maxElevations[] = {
+static const int maxElevations[] = {
// Azimuth
//
90, // 0
@@ -273,7 +274,7 @@ std::unique_ptr<HRTFElevation> HRTFElevation::createForSubject(const String& sub
}
}
- return std::make_unique<HRTFElevation>(std::move(kernelListL), std::move(kernelListR), elevation, sampleRate);
+ return std::make_unique<HRTFElevation>(WTFMove(kernelListL), WTFMove(kernelListR), elevation, sampleRate);
}
std::unique_ptr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate)
@@ -301,7 +302,7 @@ std::unique_ptr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFEl
// Interpolate elevation angle.
double angle = (1.0 - x) * hrtfElevation1->elevationAngle() + x * hrtfElevation2->elevationAngle();
- return std::make_unique<HRTFElevation>(std::move(kernelListL), std::move(kernelListR), static_cast<int>(angle), sampleRate);
+ return std::make_unique<HRTFElevation>(WTFMove(kernelListL), WTFMove(kernelListR), static_cast<int>(angle), sampleRate);
}
void HRTFElevation::getKernelsFromAzimuth(double azimuthBlend, unsigned azimuthIndex, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR)
diff --git a/Source/WebCore/platform/audio/HRTFElevation.h b/Source/WebCore/platform/audio/HRTFElevation.h
index 18dc82604..4ef7c108f 100644
--- a/Source/WebCore/platform/audio/HRTFElevation.h
+++ b/Source/WebCore/platform/audio/HRTFElevation.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,7 +32,6 @@
#include "HRTFKernel.h"
#include <memory>
#include <wtf/Noncopyable.h>
-#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/RefPtr.h>
#include <wtf/text/CString.h>
@@ -46,8 +45,8 @@ class HRTFElevation {
WTF_MAKE_NONCOPYABLE(HRTFElevation);
public:
HRTFElevation(std::unique_ptr<HRTFKernelList> kernelListL, std::unique_ptr<HRTFKernelList> kernelListR, int elevation, float sampleRate)
- : m_kernelListL(std::move(kernelListL))
- , m_kernelListR(std::move(kernelListR))
+ : m_kernelListL(WTFMove(kernelListL))
+ , m_kernelListR(WTFMove(kernelListR))
, m_elevationAngle(elevation)
, m_sampleRate(sampleRate)
{
diff --git a/Source/WebCore/platform/audio/HRTFKernel.cpp b/Source/WebCore/platform/audio/HRTFKernel.cpp
index 54a517d1a..ad22833cb 100644
--- a/Source/WebCore/platform/audio/HRTFKernel.cpp
+++ b/Source/WebCore/platform/audio/HRTFKernel.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -132,7 +132,7 @@ PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1,
float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
std::unique_ptr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);
- return HRTFKernel::create(std::move(interpolatedFrame), frameDelay, sampleRate1);
+ return HRTFKernel::create(WTFMove(interpolatedFrame), frameDelay, sampleRate1);
}
} // namespace WebCore
diff --git a/Source/WebCore/platform/audio/HRTFKernel.h b/Source/WebCore/platform/audio/HRTFKernel.h
index c22698360..d998f50d0 100644
--- a/Source/WebCore/platform/audio/HRTFKernel.h
+++ b/Source/WebCore/platform/audio/HRTFKernel.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -57,7 +57,7 @@ public:
static PassRefPtr<HRTFKernel> create(std::unique_ptr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
{
- return adoptRef(new HRTFKernel(std::move(fftFrame), frameDelay, sampleRate));
+ return adoptRef(new HRTFKernel(WTFMove(fftFrame), frameDelay, sampleRate));
}
// Given two HRTFKernels, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFKernel.
@@ -79,7 +79,7 @@ private:
HRTFKernel(AudioChannel*, size_t fftSize, float sampleRate);
HRTFKernel(std::unique_ptr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
- : m_fftFrame(std::move(fftFrame))
+ : m_fftFrame(WTFMove(fftFrame))
, m_frameDelay(frameDelay)
, m_sampleRate(sampleRate)
{
diff --git a/Source/WebCore/platform/audio/HRTFPanner.cpp b/Source/WebCore/platform/audio/HRTFPanner.cpp
index f2825475d..cd304c678 100644
--- a/Source/WebCore/platform/audio/HRTFPanner.cpp
+++ b/Source/WebCore/platform/audio/HRTFPanner.cpp
@@ -34,7 +34,6 @@
#include "HRTFDatabaseLoader.h"
#include <algorithm>
#include <wtf/MathExtras.h>
-#include <wtf/RefPtr.h>
namespace WebCore {
@@ -46,7 +45,7 @@ const int UninitializedAzimuth = -1;
const unsigned RenderingQuantum = 128;
HRTFPanner::HRTFPanner(float sampleRate, HRTFDatabaseLoader* databaseLoader)
- : Panner(PanningModelHRTF)
+ : Panner(PanningModelType::HRTF)
, m_databaseLoader(databaseLoader)
, m_sampleRate(sampleRate)
, m_crossfadeSelection(CrossfadeSelection1)
diff --git a/Source/WebCore/platform/audio/HRTFPanner.h b/Source/WebCore/platform/audio/HRTFPanner.h
index d96a76fe2..bf5eefaa5 100644
--- a/Source/WebCore/platform/audio/HRTFPanner.h
+++ b/Source/WebCore/platform/audio/HRTFPanner.h
@@ -38,16 +38,16 @@ public:
virtual ~HRTFPanner();
// Panner
- virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess) override;
- virtual void reset() override;
+ void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess) override;
+ void reset() override;
size_t fftSize() const { return fftSizeForSampleRate(m_sampleRate); }
static size_t fftSizeForSampleRate(float sampleRate);
float sampleRate() const { return m_sampleRate; }
- virtual double tailTime() const override;
- virtual double latencyTime() const override;
+ double tailTime() const override;
+ double latencyTime() const override;
private:
// Given an azimuth angle in the range -180 -> +180, returns the corresponding azimuth index for the database,
diff --git a/Source/WebCore/platform/audio/MediaSessionManager.cpp b/Source/WebCore/platform/audio/MediaSessionManager.cpp
deleted file mode 100644
index f049c6787..000000000
--- a/Source/WebCore/platform/audio/MediaSessionManager.cpp
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "MediaSessionManager.h"
-
-#include "MediaSession.h"
-
-namespace WebCore {
-
-#if !PLATFORM(IOS)
-MediaSessionManager& MediaSessionManager::sharedManager()
-{
- DEFINE_STATIC_LOCAL(MediaSessionManager, manager, ());
- return manager;
-}
-#endif
-
-MediaSessionManager::MediaSessionManager()
- : m_interruptions(0)
-{
- resetRestrictions();
-}
-
-void MediaSessionManager::resetRestrictions()
-{
- m_restrictions[MediaSession::Video] = NoRestrictions;
- m_restrictions[MediaSession::Audio] = NoRestrictions;
- m_restrictions[MediaSession::WebAudio] = NoRestrictions;
-}
-
-bool MediaSessionManager::has(MediaSession::MediaType type) const
-{
- ASSERT(type >= MediaSession::None && type <= MediaSession::WebAudio);
-
- for (auto* session : m_sessions) {
- if (session->mediaType() == type)
- return true;
- }
-
- return false;
-}
-
-int MediaSessionManager::count(MediaSession::MediaType type) const
-{
- ASSERT(type >= MediaSession::None && type <= MediaSession::WebAudio);
-
- int count = 0;
- for (auto* session : m_sessions) {
- if (session->mediaType() == type)
- ++count;
- }
-
- return count;
-}
-
-void MediaSessionManager::beginInterruption()
-{
- if (++m_interruptions > 1)
- return;
-
- for (auto* session : m_sessions)
- session->beginInterruption();
-}
-
-void MediaSessionManager::endInterruption(MediaSession::EndInterruptionFlags flags)
-{
- ASSERT(m_interruptions > 0);
- if (--m_interruptions)
- return;
-
- for (auto* session : m_sessions)
- session->endInterruption(flags);
-}
-
-void MediaSessionManager::addSession(MediaSession& session)
-{
- m_sessions.append(&session);
- session.setState(m_interruptions ? MediaSession::Interrupted : MediaSession::Running);
- updateSessionState();
-}
-
-void MediaSessionManager::removeSession(MediaSession& session)
-{
- size_t index = m_sessions.find(&session);
- ASSERT(index != notFound);
- if (index == notFound)
- return;
-
- m_sessions.remove(index);
- updateSessionState();
-}
-
-void MediaSessionManager::addRestriction(MediaSession::MediaType type, SessionRestrictions restriction)
-{
- ASSERT(type > MediaSession::None && type <= MediaSession::WebAudio);
- m_restrictions[type] |= restriction;
-}
-
-void MediaSessionManager::removeRestriction(MediaSession::MediaType type, SessionRestrictions restriction)
-{
- ASSERT(type > MediaSession::None && type <= MediaSession::WebAudio);
- m_restrictions[type] &= ~restriction;
-}
-
-MediaSessionManager::SessionRestrictions MediaSessionManager::restrictions(MediaSession::MediaType type)
-{
- ASSERT(type > MediaSession::None && type <= MediaSession::WebAudio);
- return m_restrictions[type];
-}
-
-void MediaSessionManager::sessionWillBeginPlayback(const MediaSession& session) const
-{
- MediaSession::MediaType sessionType = session.mediaType();
- SessionRestrictions restrictions = m_restrictions[sessionType];
- if (!restrictions & ConcurrentPlaybackNotPermitted)
- return;
-
- for (auto* oneSession : m_sessions) {
- if (oneSession == &session)
- continue;
- if (oneSession->mediaType() != sessionType)
- continue;
- if (restrictions & ConcurrentPlaybackNotPermitted)
- oneSession->pauseSession();
- }
-}
-
-bool MediaSessionManager::sessionRestrictsInlineVideoPlayback(const MediaSession& session) const
-{
- MediaSession::MediaType sessionType = session.mediaType();
- if (sessionType != MediaSession::Video)
- return false;
-
- return m_restrictions[sessionType] & InlineVideoPlaybackRestricted;
-}
-
-#if !PLATFORM(MAC)
-void MediaSessionManager::updateSessionState()
-{
-}
-#endif
-
-}
diff --git a/Source/WebCore/platform/audio/MediaSessionManager.h b/Source/WebCore/platform/audio/MediaSessionManager.h
deleted file mode 100644
index 227bf1b3e..000000000
--- a/Source/WebCore/platform/audio/MediaSessionManager.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef MediaSessionManager_h
-#define MediaSessionManager_h
-
-#include "MediaSession.h"
-#include "Settings.h"
-#include <map>
-#include <wtf/Vector.h>
-
-namespace WebCore {
-
-class HTMLMediaElement;
-class MediaSession;
-
-class MediaSessionManager {
-public:
- static MediaSessionManager& sharedManager();
- virtual ~MediaSessionManager() { }
-
- bool has(MediaSession::MediaType) const;
- int count(MediaSession::MediaType) const;
-
- void beginInterruption();
- void endInterruption(MediaSession::EndInterruptionFlags);
-
- enum SessionRestrictionFlags {
- NoRestrictions = 0,
- ConcurrentPlaybackNotPermitted = 1 << 0,
- InlineVideoPlaybackRestricted = 1 << 1,
- MetadataPreloadingNotPermitted = 1 << 2,
- AutoPreloadingNotPermitted = 1 << 3,
- };
- typedef unsigned SessionRestrictions;
-
- void addRestriction(MediaSession::MediaType, SessionRestrictions);
- void removeRestriction(MediaSession::MediaType, SessionRestrictions);
- SessionRestrictions restrictions(MediaSession::MediaType);
- virtual void resetRestrictions();
-
- void sessionWillBeginPlayback(const MediaSession&) const;
- bool sessionRestrictsInlineVideoPlayback(const MediaSession&) const;
-
-protected:
- friend class MediaSession;
- explicit MediaSessionManager();
-
- void addSession(MediaSession&);
- void removeSession(MediaSession&);
-
-private:
- void updateSessionState();
-
- SessionRestrictions m_restrictions[MediaSession::WebAudio + 1];
-
- Vector<MediaSession*> m_sessions;
- int m_interruptions;
-};
-
-}
-
-#endif // MediaSessionManager_h
diff --git a/Source/WebCore/platform/audio/MultiChannelResampler.cpp b/Source/WebCore/platform/audio/MultiChannelResampler.cpp
index e93b861cd..899a81405 100644
--- a/Source/WebCore/platform/audio/MultiChannelResampler.cpp
+++ b/Source/WebCore/platform/audio/MultiChannelResampler.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -53,7 +53,7 @@ public:
// provideInput() will be called once for each channel, starting with the first channel.
// Each time it's called, it will provide the next channel of data.
- virtual void provideInput(AudioBus* bus, size_t framesToProcess)
+ void provideInput(AudioBus* bus, size_t framesToProcess) override
{
bool isBusGood = bus && bus->numberOfChannels() == 1;
ASSERT(isBusGood);
diff --git a/Source/WebCore/platform/audio/MultiChannelResampler.h b/Source/WebCore/platform/audio/MultiChannelResampler.h
index e2057d978..1f8d8a522 100644
--- a/Source/WebCore/platform/audio/MultiChannelResampler.h
+++ b/Source/WebCore/platform/audio/MultiChannelResampler.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/Panner.cpp b/Source/WebCore/platform/audio/Panner.cpp
index 9c9987560..ac7286ac9 100644
--- a/Source/WebCore/platform/audio/Panner.cpp
+++ b/Source/WebCore/platform/audio/Panner.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -37,24 +37,18 @@
namespace WebCore {
-std::unique_ptr<Panner> Panner::create(PanningModel model, float sampleRate, HRTFDatabaseLoader* databaseLoader)
+std::unique_ptr<Panner> Panner::create(PanningModelType model, float sampleRate, HRTFDatabaseLoader* databaseLoader)
{
std::unique_ptr<Panner> panner;
switch (model) {
- case PanningModelEqualPower:
+ case PanningModelType::Equalpower:
panner = std::make_unique<EqualPowerPanner>(sampleRate);
break;
- case PanningModelHRTF:
+ case PanningModelType::HRTF:
panner = std::make_unique<HRTFPanner>(sampleRate, databaseLoader);
break;
-
- // FIXME: sound field panning is not yet implemented...
- case PanningModelSoundField:
- default:
- ASSERT_NOT_REACHED();
- return nullptr;
}
return panner;
diff --git a/Source/WebCore/platform/audio/Panner.h b/Source/WebCore/platform/audio/Panner.h
index fe298ccba..2700c4c61 100644
--- a/Source/WebCore/platform/audio/Panner.h
+++ b/Source/WebCore/platform/audio/Panner.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -36,23 +36,20 @@ namespace WebCore {
class AudioBus;
class HRTFDatabaseLoader;
+enum class PanningModelType {
+ Equalpower,
+ HRTF
+};
+
// Abstract base class for panning a mono or stereo source.
class Panner {
public:
- enum {
- PanningModelEqualPower = 0,
- PanningModelHRTF = 1,
- PanningModelSoundField = 2
- };
-
- typedef unsigned PanningModel;
-
- static std::unique_ptr<Panner> create(PanningModel, float sampleRate, HRTFDatabaseLoader*);
+ static std::unique_ptr<Panner> create(PanningModelType, float sampleRate, HRTFDatabaseLoader*);
virtual ~Panner() { };
- PanningModel panningModel() const { return m_panningModel; }
+ PanningModelType panningModel() const { return m_panningModel; }
virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess) = 0;
@@ -62,9 +59,12 @@ public:
virtual double latencyTime() const = 0;
protected:
- Panner(PanningModel model) : m_panningModel(model) { }
+ Panner(PanningModelType model)
+ : m_panningModel(model)
+ {
+ }
- PanningModel m_panningModel;
+ PanningModelType m_panningModel;
};
} // namespace WebCore
diff --git a/Source/WebCore/platform/audio/AudioSessionListener.h b/Source/WebCore/platform/audio/PlatformAudioData.h
index f25551a7f..e4544f632 100644
--- a/Source/WebCore/platform/audio/AudioSessionListener.h
+++ b/Source/WebCore/platform/audio/PlatformAudioData.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,27 +23,23 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioSessionListener_h
-#define AudioSessionListener_h
-
-#if USE(AUDIO_SESSION)
-
-#include <wtf/Noncopyable.h>
+#pragma once
namespace WebCore {
-class AudioSessionListener {
- WTF_MAKE_NONCOPYABLE(AudioSessionListener);
+class PlatformAudioData {
public:
- virtual void beganAudioInterruption() = 0;
- virtual void endedAudioInterruption() = 0;
+ virtual ~PlatformAudioData() = default;
+
+ enum class Kind {
+ None,
+ WebAudioBufferList,
+ };
+
+ virtual Kind kind() const { return Kind::None; }
+
protected:
- AudioSessionListener() { }
- virtual ~AudioSessionListener() { }
+ PlatformAudioData() = default;
};
}
-
-#endif // USE(AUDIO_SESSION)
-
-#endif // AudioSessionListener_h
diff --git a/Source/WebCore/platform/audio/PlatformMediaSession.cpp b/Source/WebCore/platform/audio/PlatformMediaSession.cpp
new file mode 100644
index 000000000..569a5b9e0
--- /dev/null
+++ b/Source/WebCore/platform/audio/PlatformMediaSession.cpp
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2014-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PlatformMediaSession.h"
+
+#if ENABLE(VIDEO) || ENABLE(WEB_AUDIO)
+#include "HTMLMediaElement.h"
+#include "Logging.h"
+#include "MediaPlayer.h"
+#include "PlatformMediaSessionManager.h"
+
+namespace WebCore {
+
+const double kClientDataBufferingTimerThrottleDelay = 0.1;
+
+#if !LOG_DISABLED
+static const char* stateName(PlatformMediaSession::State state)
+{
+#define STATE_CASE(state) case PlatformMediaSession::state: return #state
+ switch (state) {
+ STATE_CASE(Idle);
+ STATE_CASE(Autoplaying);
+ STATE_CASE(Playing);
+ STATE_CASE(Paused);
+ STATE_CASE(Interrupted);
+ }
+
+ ASSERT_NOT_REACHED();
+ return "";
+}
+
+static const char* interruptionName(PlatformMediaSession::InterruptionType type)
+{
+#define INTERRUPTION_CASE(type) case PlatformMediaSession::type: return #type
+ switch (type) {
+ INTERRUPTION_CASE(NoInterruption);
+ INTERRUPTION_CASE(SystemSleep);
+ INTERRUPTION_CASE(EnteringBackground);
+ INTERRUPTION_CASE(SystemInterruption);
+ INTERRUPTION_CASE(SuspendedUnderLock);
+ INTERRUPTION_CASE(InvisibleAutoplay);
+ }
+
+ ASSERT_NOT_REACHED();
+ return "";
+}
+#endif
+
+std::unique_ptr<PlatformMediaSession> PlatformMediaSession::create(PlatformMediaSessionClient& client)
+{
+ return std::make_unique<PlatformMediaSession>(client);
+}
+
+PlatformMediaSession::PlatformMediaSession(PlatformMediaSessionClient& client)
+ : m_client(client)
+ , m_clientDataBufferingTimer(*this, &PlatformMediaSession::clientDataBufferingTimerFired)
+ , m_state(Idle)
+ , m_stateToRestore(Idle)
+ , m_notifyingClient(false)
+{
+ ASSERT(m_client.mediaType() >= None && m_client.mediaType() <= WebAudio);
+ PlatformMediaSessionManager::sharedManager().addSession(*this);
+}
+
+PlatformMediaSession::~PlatformMediaSession()
+{
+ PlatformMediaSessionManager::sharedManager().removeSession(*this);
+}
+
+void PlatformMediaSession::setState(State state)
+{
+ LOG(Media, "PlatformMediaSession::setState(%p) - %s", this, stateName(state));
+ m_state = state;
+}
+
+void PlatformMediaSession::beginInterruption(InterruptionType type)
+{
+ LOG(Media, "PlatformMediaSession::beginInterruption(%p), state = %s, interruption type = %s, interruption count = %i", this, stateName(m_state), interruptionName(type), m_interruptionCount);
+
+ // When interruptions are overridden, m_interruptionType doesn't get set.
+ // Give nested interruptions a chance when the previous interruptions were overridden.
+ if (++m_interruptionCount > 1 && m_interruptionType != NoInterruption)
+ return;
+
+ if (client().shouldOverrideBackgroundPlaybackRestriction(type)) {
+ LOG(Media, "PlatformMediaSession::beginInterruption(%p), returning early because client says to override interruption", this);
+ return;
+ }
+
+ m_stateToRestore = state();
+ m_notifyingClient = true;
+ setState(Interrupted);
+ m_interruptionType = type;
+ client().suspendPlayback();
+ m_notifyingClient = false;
+}
+
+void PlatformMediaSession::endInterruption(EndInterruptionFlags flags)
+{
+ LOG(Media, "PlatformMediaSession::endInterruption(%p) - flags = %i, stateToRestore = %s, interruption count = %i", this, (int)flags, stateName(m_stateToRestore), m_interruptionCount);
+
+ if (!m_interruptionCount) {
+ LOG(Media, "PlatformMediaSession::endInterruption(%p) - !! ignoring spurious interruption end !!", this);
+ return;
+ }
+
+ if (--m_interruptionCount)
+ return;
+
+ State stateToRestore = m_stateToRestore;
+ m_stateToRestore = Idle;
+ m_interruptionType = NoInterruption;
+ setState(stateToRestore);
+
+ if (stateToRestore == Autoplaying)
+ client().resumeAutoplaying();
+
+ bool shouldResume = flags & MayResumePlaying && stateToRestore == Playing;
+ client().mayResumePlayback(shouldResume);
+}
+
+void PlatformMediaSession::clientWillBeginAutoplaying()
+{
+ if (m_notifyingClient)
+ return;
+
+ LOG(Media, "PlatformMediaSession::clientWillBeginAutoplaying(%p)- state = %s", this, stateName(m_state));
+ if (state() == Interrupted) {
+ m_stateToRestore = Autoplaying;
+ LOG(Media, " setting stateToRestore to \"Autoplaying\"");
+ return;
+ }
+
+ setState(Autoplaying);
+ updateClientDataBuffering();
+}
+
+bool PlatformMediaSession::clientWillBeginPlayback()
+{
+ if (m_notifyingClient)
+ return true;
+
+ if (!PlatformMediaSessionManager::sharedManager().sessionWillBeginPlayback(*this)) {
+ if (state() == Interrupted)
+ m_stateToRestore = Playing;
+ return false;
+ }
+
+ setState(Playing);
+ updateClientDataBuffering();
+ return true;
+}
+
+bool PlatformMediaSession::clientWillPausePlayback()
+{
+ if (m_notifyingClient)
+ return true;
+
+ LOG(Media, "PlatformMediaSession::clientWillPausePlayback(%p)- state = %s", this, stateName(m_state));
+ if (state() == Interrupted) {
+ m_stateToRestore = Paused;
+ LOG(Media, " setting stateToRestore to \"Paused\"");
+ return false;
+ }
+
+ setState(Paused);
+ PlatformMediaSessionManager::sharedManager().sessionWillEndPlayback(*this);
+ scheduleClientDataBufferingCheck();
+ return true;
+}
+
+void PlatformMediaSession::pauseSession()
+{
+ LOG(Media, "PlatformMediaSession::pauseSession(%p)", this);
+ m_client.suspendPlayback();
+}
+
+void PlatformMediaSession::stopSession()
+{
+ LOG(Media, "PlatformMediaSession::stopSession(%p)", this);
+ m_client.suspendPlayback();
+ PlatformMediaSessionManager::sharedManager().removeSession(*this);
+}
+
+PlatformMediaSession::MediaType PlatformMediaSession::mediaType() const
+{
+ return m_client.mediaType();
+}
+
+PlatformMediaSession::MediaType PlatformMediaSession::presentationType() const
+{
+ return m_client.presentationType();
+}
+
+PlatformMediaSession::CharacteristicsFlags PlatformMediaSession::characteristics() const
+{
+ return m_client.characteristics();
+}
+
+#if ENABLE(VIDEO)
+String PlatformMediaSession::title() const
+{
+ return m_client.mediaSessionTitle();
+}
+
+double PlatformMediaSession::duration() const
+{
+ return m_client.mediaSessionDuration();
+}
+
+double PlatformMediaSession::currentTime() const
+{
+ return m_client.mediaSessionCurrentTime();
+}
+#endif
+
+bool PlatformMediaSession::canReceiveRemoteControlCommands() const
+{
+ return m_client.canReceiveRemoteControlCommands();
+}
+
+void PlatformMediaSession::didReceiveRemoteControlCommand(RemoteControlCommandType command, const PlatformMediaSession::RemoteCommandArgument* argument)
+{
+ m_client.didReceiveRemoteControlCommand(command, argument);
+}
+
+bool PlatformMediaSession::supportsSeeking() const
+{
+ return m_client.supportsSeeking();
+}
+
+void PlatformMediaSession::visibilityChanged()
+{
+ scheduleClientDataBufferingCheck();
+}
+
+void PlatformMediaSession::scheduleClientDataBufferingCheck()
+{
+ if (!m_clientDataBufferingTimer.isActive())
+ m_clientDataBufferingTimer.startOneShot(kClientDataBufferingTimerThrottleDelay);
+}
+
+void PlatformMediaSession::clientDataBufferingTimerFired()
+{
+ LOG(Media, "PlatformMediaSession::clientDataBufferingTimerFired(%p)- visible = %s", this, m_client.elementIsHidden() ? "false" : "true");
+
+ updateClientDataBuffering();
+
+#if PLATFORM(IOS)
+ PlatformMediaSessionManager::sharedManager().configureWireLessTargetMonitoring();
+#endif
+
+ if (m_state != Playing || !m_client.elementIsHidden())
+ return;
+
+ PlatformMediaSessionManager::SessionRestrictions restrictions = PlatformMediaSessionManager::sharedManager().restrictions(mediaType());
+ if ((restrictions & PlatformMediaSessionManager::BackgroundTabPlaybackRestricted) == PlatformMediaSessionManager::BackgroundTabPlaybackRestricted)
+ pauseSession();
+}
+
+void PlatformMediaSession::updateClientDataBuffering()
+{
+ if (m_clientDataBufferingTimer.isActive())
+ m_clientDataBufferingTimer.stop();
+
+ m_client.setShouldBufferData(PlatformMediaSessionManager::sharedManager().sessionCanLoadMedia(*this));
+}
+
+String PlatformMediaSession::sourceApplicationIdentifier() const
+{
+ return m_client.sourceApplicationIdentifier();
+}
+
+bool PlatformMediaSession::isHidden() const
+{
+ return m_client.elementIsHidden();
+}
+
+bool PlatformMediaSession::shouldOverrideBackgroundLoadingRestriction() const
+{
+ return m_client.shouldOverrideBackgroundLoadingRestriction();
+}
+
+void PlatformMediaSession::isPlayingToWirelessPlaybackTargetChanged(bool isWireless)
+{
+ if (isWireless == m_isPlayingToWirelessPlaybackTarget)
+ return;
+
+ m_isPlayingToWirelessPlaybackTarget = isWireless;
+
+ // Save and restore the interruption count so it doesn't get out of sync if beginInterruption is called because
+ // if we in the background.
+ int interruptionCount = m_interruptionCount;
+ PlatformMediaSessionManager::sharedManager().sessionIsPlayingToWirelessPlaybackTargetChanged(*this);
+ m_interruptionCount = interruptionCount;
+}
+
+PlatformMediaSession::DisplayType PlatformMediaSession::displayType() const
+{
+ return m_client.displayType();
+}
+
+bool PlatformMediaSession::activeAudioSessionRequired()
+{
+ if (mediaType() == PlatformMediaSession::None)
+ return false;
+ if (state() != PlatformMediaSession::State::Playing)
+ return false;
+ return canProduceAudio();
+}
+
+bool PlatformMediaSession::canProduceAudio() const
+{
+ return m_client.canProduceAudio();
+}
+
+void PlatformMediaSession::canProduceAudioChanged()
+{
+ PlatformMediaSessionManager::sharedManager().sessionCanProduceAudioChanged(*this);
+}
+
+#if ENABLE(VIDEO)
+String PlatformMediaSessionClient::mediaSessionTitle() const
+{
+ return String();
+}
+
+double PlatformMediaSessionClient::mediaSessionDuration() const
+{
+ return MediaPlayer::invalidTime();
+}
+
+double PlatformMediaSessionClient::mediaSessionCurrentTime() const
+{
+ return MediaPlayer::invalidTime();
+}
+#endif
+
+void PlatformMediaSession::clientCharacteristicsChanged()
+{
+ PlatformMediaSessionManager::sharedManager().clientCharacteristicsChanged(*this);
+}
+
+}
+#endif
diff --git a/Source/WebCore/platform/audio/PlatformMediaSession.h b/Source/WebCore/platform/audio/PlatformMediaSession.h
new file mode 100644
index 000000000..fd5294c36
--- /dev/null
+++ b/Source/WebCore/platform/audio/PlatformMediaSession.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2014-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PlatformMediaSession_h
+#define PlatformMediaSession_h
+
+#include "MediaProducer.h"
+#include "Timer.h"
+#include <wtf/Noncopyable.h>
+#include <wtf/text/WTFString.h>
+
+#if ENABLE(WIRELESS_PLAYBACK_TARGET)
+#include "MediaPlaybackTargetClient.h"
+#endif
+
+namespace WebCore {
+
+class Document;
+class MediaPlaybackTarget;
+class PlatformMediaSessionClient;
+
+class PlatformMediaSession
+#if ENABLE(WIRELESS_PLAYBACK_TARGET)
+ : public MediaPlaybackTargetClient
+#endif
+{
+public:
+ static std::unique_ptr<PlatformMediaSession> create(PlatformMediaSessionClient&);
+
+ PlatformMediaSession(PlatformMediaSessionClient&);
+ virtual ~PlatformMediaSession();
+
+ enum MediaType {
+ None = 0,
+ Video,
+ VideoAudio,
+ Audio,
+ WebAudio,
+ };
+ MediaType mediaType() const;
+ MediaType presentationType() const;
+
+ enum State {
+ Idle,
+ Autoplaying,
+ Playing,
+ Paused,
+ Interrupted,
+ };
+ State state() const { return m_state; }
+ void setState(State);
+
+ enum InterruptionType {
+ NoInterruption,
+ SystemSleep,
+ EnteringBackground,
+ SystemInterruption,
+ SuspendedUnderLock,
+ InvisibleAutoplay,
+ };
+ InterruptionType interruptionType() const { return m_interruptionType; }
+
+ enum EndInterruptionFlags {
+ NoFlags = 0,
+ MayResumePlaying = 1 << 0,
+ };
+
+ enum Characteristics {
+ HasNothing = 0,
+ HasAudio = 1 << 0,
+ HasVideo = 1 << 1,
+ };
+ typedef unsigned CharacteristicsFlags;
+
+ CharacteristicsFlags characteristics() const;
+ void clientCharacteristicsChanged();
+
+ void beginInterruption(InterruptionType);
+ void endInterruption(EndInterruptionFlags);
+
+ void clientWillBeginAutoplaying();
+ bool clientWillBeginPlayback();
+ bool clientWillPausePlayback();
+
+ void pauseSession();
+ void stopSession();
+
+ void visibilityChanged();
+
+#if ENABLE(VIDEO)
+ String title() const;
+ double duration() const;
+ double currentTime() const;
+#endif
+
+ typedef union {
+ double asDouble;
+ } RemoteCommandArgument;
+
+ enum RemoteControlCommandType {
+ NoCommand,
+ PlayCommand,
+ PauseCommand,
+ StopCommand,
+ TogglePlayPauseCommand,
+ BeginSeekingBackwardCommand,
+ EndSeekingBackwardCommand,
+ BeginSeekingForwardCommand,
+ EndSeekingForwardCommand,
+ SeekToPlaybackPositionCommand,
+ };
+ bool canReceiveRemoteControlCommands() const;
+ void didReceiveRemoteControlCommand(RemoteControlCommandType, const RemoteCommandArgument* argument = nullptr);
+ bool supportsSeeking() const;
+
+ enum DisplayType {
+ Normal,
+ Fullscreen,
+ Optimized,
+ };
+ DisplayType displayType() const;
+
+ bool isHidden() const;
+
+ bool shouldOverrideBackgroundLoadingRestriction() const;
+
+ virtual bool canPlayToWirelessPlaybackTarget() const { return false; }
+ virtual bool isPlayingToWirelessPlaybackTarget() const { return m_isPlayingToWirelessPlaybackTarget; }
+ void isPlayingToWirelessPlaybackTargetChanged(bool);
+
+#if ENABLE(WIRELESS_PLAYBACK_TARGET)
+ // MediaPlaybackTargetClient
+ void setPlaybackTarget(Ref<MediaPlaybackTarget>&&) override { }
+ void externalOutputDeviceAvailableDidChange(bool) override { }
+ void setShouldPlayToPlaybackTarget(bool) override { }
+#endif
+
+#if PLATFORM(IOS)
+ virtual bool requiresPlaybackTargetRouteMonitoring() const { return false; }
+#endif
+
+ bool activeAudioSessionRequired();
+ bool canProduceAudio() const;
+ void canProduceAudioChanged();
+
+ void scheduleClientDataBufferingCheck();
+ virtual void resetPlaybackSessionState() { }
+ String sourceApplicationIdentifier() const;
+
+ virtual bool allowsNowPlayingControlsVisibility() const { return false; }
+
+protected:
+ PlatformMediaSessionClient& client() const { return m_client; }
+
+private:
+ void clientDataBufferingTimerFired();
+ void updateClientDataBuffering();
+
+ PlatformMediaSessionClient& m_client;
+ Timer m_clientDataBufferingTimer;
+ State m_state;
+ State m_stateToRestore;
+ InterruptionType m_interruptionType { NoInterruption };
+ int m_interruptionCount { 0 };
+ bool m_notifyingClient;
+ bool m_isPlayingToWirelessPlaybackTarget { false };
+
+ friend class PlatformMediaSessionManager;
+};
+
+class PlatformMediaSessionClient {
+ WTF_MAKE_NONCOPYABLE(PlatformMediaSessionClient);
+public:
+ PlatformMediaSessionClient() { }
+
+ virtual PlatformMediaSession::MediaType mediaType() const = 0;
+ virtual PlatformMediaSession::MediaType presentationType() const = 0;
+ virtual PlatformMediaSession::DisplayType displayType() const { return PlatformMediaSession::Normal; }
+ virtual PlatformMediaSession::CharacteristicsFlags characteristics() const = 0;
+
+ virtual void resumeAutoplaying() { }
+ virtual void mayResumePlayback(bool shouldResume) = 0;
+ virtual void suspendPlayback() = 0;
+
+#if ENABLE(VIDEO)
+ virtual String mediaSessionTitle() const;
+ virtual double mediaSessionDuration() const;
+ virtual double mediaSessionCurrentTime() const;
+#endif
+
+ virtual bool canReceiveRemoteControlCommands() const = 0;
+ virtual void didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType, const PlatformMediaSession::RemoteCommandArgument*) = 0;
+ virtual bool supportsSeeking() const = 0;
+
+ virtual void setShouldBufferData(bool) { }
+ virtual bool elementIsHidden() const { return false; }
+ virtual bool canProduceAudio() const { return false; }
+
+ virtual bool shouldOverrideBackgroundPlaybackRestriction(PlatformMediaSession::InterruptionType) const = 0;
+ virtual bool shouldOverrideBackgroundLoadingRestriction() const { return false; }
+
+ virtual void wirelessRoutesAvailableDidChange() { }
+ virtual void setWirelessPlaybackTarget(Ref<MediaPlaybackTarget>&&) { }
+ virtual bool canPlayToWirelessPlaybackTarget() const { return false; }
+ virtual bool isPlayingToWirelessPlaybackTarget() const { return false; }
+ virtual void setShouldPlayToPlaybackTarget(bool) { }
+
+ virtual const Document* hostingDocument() const = 0;
+ virtual String sourceApplicationIdentifier() const = 0;
+
+protected:
+ virtual ~PlatformMediaSessionClient() { }
+};
+
+}
+
+#endif // PlatformMediaSession_h
diff --git a/Source/WebCore/platform/audio/PlatformMediaSessionManager.cpp b/Source/WebCore/platform/audio/PlatformMediaSessionManager.cpp
new file mode 100644
index 000000000..b58902c66
--- /dev/null
+++ b/Source/WebCore/platform/audio/PlatformMediaSessionManager.cpp
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PlatformMediaSessionManager.h"
+
+#include "AudioSession.h"
+#include "Document.h"
+#include "Logging.h"
+#include "PlatformMediaSession.h"
+
+namespace WebCore {
+
+#if !PLATFORM(MAC)
+void PlatformMediaSessionManager::updateNowPlayingInfoIfNecessary()
+{
+}
+#endif
+
+#if ENABLE(VIDEO) || ENABLE(WEB_AUDIO)
+
+#if !PLATFORM(COCOA)
+static PlatformMediaSessionManager* platformMediaSessionManager = nullptr;
+
+PlatformMediaSessionManager& PlatformMediaSessionManager::sharedManager()
+{
+ if (!platformMediaSessionManager)
+ platformMediaSessionManager = new PlatformMediaSessionManager;
+ return *platformMediaSessionManager;
+}
+
+PlatformMediaSessionManager* PlatformMediaSessionManager::sharedManagerIfExists()
+{
+ return platformMediaSessionManager;
+}
+#endif // !PLATFORM(COCOA)
+
+PlatformMediaSessionManager::PlatformMediaSessionManager()
+ : m_systemSleepListener(SystemSleepListener::create(*this))
+{
+ resetRestrictions();
+}
+
+void PlatformMediaSessionManager::resetRestrictions()
+{
+ m_restrictions[PlatformMediaSession::Video] = NoRestrictions;
+ m_restrictions[PlatformMediaSession::Audio] = NoRestrictions;
+ m_restrictions[PlatformMediaSession::VideoAudio] = NoRestrictions;
+ m_restrictions[PlatformMediaSession::WebAudio] = NoRestrictions;
+}
+
+bool PlatformMediaSessionManager::has(PlatformMediaSession::MediaType type) const
+{
+ ASSERT(type >= PlatformMediaSession::None && type <= PlatformMediaSession::WebAudio);
+
+ return anyOfSessions([type] (PlatformMediaSession& session, size_t) {
+ return session.mediaType() == type;
+ });
+}
+
+bool PlatformMediaSessionManager::activeAudioSessionRequired() const
+{
+ return anyOfSessions([] (PlatformMediaSession& session, size_t) {
+ return session.activeAudioSessionRequired();
+ });
+}
+
+bool PlatformMediaSessionManager::canProduceAudio() const
+{
+ return anyOfSessions([] (PlatformMediaSession& session, size_t) {
+ return session.canProduceAudio();
+ });
+}
+
+int PlatformMediaSessionManager::count(PlatformMediaSession::MediaType type) const
+{
+ ASSERT(type >= PlatformMediaSession::None && type <= PlatformMediaSession::WebAudio);
+
+ int count = 0;
+ for (auto* session : m_sessions) {
+ if (session->mediaType() == type)
+ ++count;
+ }
+
+ return count;
+}
+
+void PlatformMediaSessionManager::beginInterruption(PlatformMediaSession::InterruptionType type)
+{
+ LOG(Media, "PlatformMediaSessionManager::beginInterruption");
+
+ m_interrupted = true;
+ forEachSession([type] (PlatformMediaSession& session, size_t) {
+ session.beginInterruption(type);
+ });
+ updateSessionState();
+}
+
+void PlatformMediaSessionManager::endInterruption(PlatformMediaSession::EndInterruptionFlags flags)
+{
+ LOG(Media, "PlatformMediaSessionManager::endInterruption");
+
+ m_interrupted = false;
+ forEachSession([flags] (PlatformMediaSession& session, size_t) {
+ session.endInterruption(flags);
+ });
+}
+
+void PlatformMediaSessionManager::addSession(PlatformMediaSession& session)
+{
+ LOG(Media, "PlatformMediaSessionManager::addSession - %p", &session);
+
+ m_sessions.append(&session);
+ if (m_interrupted)
+ session.setState(PlatformMediaSession::Interrupted);
+
+ if (!m_remoteCommandListener)
+ m_remoteCommandListener = RemoteCommandListener::create(*this);
+
+ if (!m_audioHardwareListener)
+ m_audioHardwareListener = AudioHardwareListener::create(*this);
+
+ updateSessionState();
+}
+
+void PlatformMediaSessionManager::removeSession(PlatformMediaSession& session)
+{
+ LOG(Media, "PlatformMediaSessionManager::removeSession - %p", &session);
+
+ size_t index = m_sessions.find(&session);
+ if (index == notFound)
+ return;
+
+ if (m_iteratingOverSessions)
+ m_sessions.at(index) = nullptr;
+ else
+ m_sessions.remove(index);
+
+ if (m_sessions.isEmpty() || std::all_of(m_sessions.begin(), m_sessions.end(), std::logical_not<void>())) {
+ m_remoteCommandListener = nullptr;
+ m_audioHardwareListener = nullptr;
+ }
+
+ updateSessionState();
+}
+
+void PlatformMediaSessionManager::addRestriction(PlatformMediaSession::MediaType type, SessionRestrictions restriction)
+{
+ ASSERT(type > PlatformMediaSession::None && type <= PlatformMediaSession::WebAudio);
+ m_restrictions[type] |= restriction;
+}
+
+void PlatformMediaSessionManager::removeRestriction(PlatformMediaSession::MediaType type, SessionRestrictions restriction)
+{
+ ASSERT(type > PlatformMediaSession::None && type <= PlatformMediaSession::WebAudio);
+ m_restrictions[type] &= ~restriction;
+}
+
+PlatformMediaSessionManager::SessionRestrictions PlatformMediaSessionManager::restrictions(PlatformMediaSession::MediaType type)
+{
+ ASSERT(type > PlatformMediaSession::None && type <= PlatformMediaSession::WebAudio);
+ return m_restrictions[type];
+}
+
+bool PlatformMediaSessionManager::sessionWillBeginPlayback(PlatformMediaSession& session)
+{
+ LOG(Media, "PlatformMediaSessionManager::sessionWillBeginPlayback - %p", &session);
+
+ setCurrentSession(session);
+
+ PlatformMediaSession::MediaType sessionType = session.mediaType();
+ SessionRestrictions restrictions = m_restrictions[sessionType];
+ if (session.state() == PlatformMediaSession::Interrupted && restrictions & InterruptedPlaybackNotPermitted)
+ return false;
+
+#if USE(AUDIO_SESSION)
+ if (activeAudioSessionRequired() && !AudioSession::sharedSession().tryToSetActive(true))
+ return false;
+#endif
+
+ if (m_interrupted)
+ endInterruption(PlatformMediaSession::NoFlags);
+
+ forEachSession([&] (PlatformMediaSession& oneSession, size_t) {
+ if (&oneSession == &session)
+ return;
+ if (oneSession.mediaType() == sessionType
+ && restrictions & ConcurrentPlaybackNotPermitted
+ && oneSession.state() == PlatformMediaSession::Playing)
+ oneSession.pauseSession();
+ });
+
+ updateSessionState();
+ return true;
+}
+
+void PlatformMediaSessionManager::sessionWillEndPlayback(PlatformMediaSession& session)
+{
+ LOG(Media, "PlatformMediaSessionManager::sessionWillEndPlayback - %p", &session);
+
+ if (m_sessions.size() < 2)
+ return;
+
+ size_t pausingSessionIndex = notFound;
+ size_t lastPlayingSessionIndex = notFound;
+ anyOfSessions([&] (PlatformMediaSession& oneSession, size_t i) {
+ if (&oneSession == &session) {
+ pausingSessionIndex = i;
+ return false;
+ }
+ if (oneSession.state() == PlatformMediaSession::Playing) {
+ lastPlayingSessionIndex = i;
+ return false;
+ }
+ return oneSession.state() != PlatformMediaSession::Playing;
+ });
+ if (lastPlayingSessionIndex == notFound || pausingSessionIndex == notFound)
+ return;
+
+ if (pausingSessionIndex > lastPlayingSessionIndex)
+ return;
+
+ m_sessions.remove(pausingSessionIndex);
+ m_sessions.insert(lastPlayingSessionIndex, &session);
+
+ LOG(Media, "PlatformMediaSessionManager::sessionWillEndPlayback - session moved from index %zu to %zu", pausingSessionIndex, lastPlayingSessionIndex);
+}
+
+void PlatformMediaSessionManager::setCurrentSession(PlatformMediaSession& session)
+{
+ LOG(Media, "PlatformMediaSessionManager::setCurrentSession - %p", &session);
+
+ if (m_sessions.size() < 2)
+ return;
+
+ size_t index = m_sessions.find(&session);
+ ASSERT(index != notFound);
+ if (!index || index == notFound)
+ return;
+
+ m_sessions.remove(index);
+ m_sessions.insert(0, &session);
+ if (m_remoteCommandListener)
+ m_remoteCommandListener->updateSupportedCommands();
+
+ LOG(Media, "PlatformMediaSessionManager::setCurrentSession - session moved from index %zu to 0", index);
+}
+
+PlatformMediaSession* PlatformMediaSessionManager::currentSession() const
+{
+ if (!m_sessions.size())
+ return nullptr;
+
+ return m_sessions[0];
+}
+
+Vector<PlatformMediaSession*> PlatformMediaSessionManager::currentSessionsMatching(std::function<bool(const PlatformMediaSession &)> filter)
+{
+ Vector<PlatformMediaSession*> matchingSessions;
+ forEachSession([&] (PlatformMediaSession& session, size_t) {
+ if (filter(session))
+ matchingSessions.append(&session);
+ });
+ return matchingSessions;
+}
+
+bool PlatformMediaSessionManager::sessionCanLoadMedia(const PlatformMediaSession& session) const
+{
+ return session.state() == PlatformMediaSession::Playing || !session.isHidden() || session.shouldOverrideBackgroundLoadingRestriction();
+}
+
+void PlatformMediaSessionManager::applicationWillEnterBackground() const
+{
+ LOG(Media, "PlatformMediaSessionManager::applicationWillEnterBackground");
+
+ if (m_isApplicationInBackground)
+ return;
+
+ m_isApplicationInBackground = true;
+
+ Vector<PlatformMediaSession*> sessions = m_sessions;
+ forEachSession([&] (PlatformMediaSession& session, size_t) {
+ if (m_restrictions[session.mediaType()] & BackgroundProcessPlaybackRestricted)
+ session.beginInterruption(PlatformMediaSession::EnteringBackground);
+ });
+}
+
+void PlatformMediaSessionManager::applicationDidEnterForeground() const
+{
+ LOG(Media, "PlatformMediaSessionManager::applicationDidEnterForeground");
+
+ if (!m_isApplicationInBackground)
+ return;
+
+ m_isApplicationInBackground = false;
+
+ Vector<PlatformMediaSession*> sessions = m_sessions;
+ forEachSession([&] (PlatformMediaSession& session, size_t) {
+ if (m_restrictions[session.mediaType()] & BackgroundProcessPlaybackRestricted)
+ session.endInterruption(PlatformMediaSession::MayResumePlaying);
+ });
+}
+
+void PlatformMediaSessionManager::sessionIsPlayingToWirelessPlaybackTargetChanged(PlatformMediaSession& session)
+{
+ if (!m_isApplicationInBackground || !(m_restrictions[session.mediaType()] & BackgroundProcessPlaybackRestricted))
+ return;
+
+ if (session.state() != PlatformMediaSession::Interrupted)
+ session.beginInterruption(PlatformMediaSession::EnteringBackground);
+}
+
+void PlatformMediaSessionManager::sessionCanProduceAudioChanged(PlatformMediaSession&)
+{
+ updateSessionState();
+}
+
+#if !PLATFORM(COCOA)
+void PlatformMediaSessionManager::updateSessionState()
+{
+}
+#endif
+
+void PlatformMediaSessionManager::didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType command, const PlatformMediaSession::RemoteCommandArgument* argument)
+{
+ PlatformMediaSession* activeSession = currentSession();
+ if (!activeSession || !activeSession->canReceiveRemoteControlCommands())
+ return;
+ activeSession->didReceiveRemoteControlCommand(command, argument);
+}
+
+bool PlatformMediaSessionManager::supportsSeeking() const
+{
+ PlatformMediaSession* activeSession = currentSession();
+ if (!activeSession)
+ return false;
+ return activeSession->supportsSeeking();
+}
+
+void PlatformMediaSessionManager::systemWillSleep()
+{
+ if (m_interrupted)
+ return;
+
+ forEachSession([] (PlatformMediaSession& session, size_t) {
+ session.beginInterruption(PlatformMediaSession::SystemSleep);
+ });
+}
+
+void PlatformMediaSessionManager::systemDidWake()
+{
+ if (m_interrupted)
+ return;
+
+ forEachSession([] (PlatformMediaSession& session, size_t) {
+ session.endInterruption(PlatformMediaSession::MayResumePlaying);
+ });
+}
+
+void PlatformMediaSessionManager::audioOutputDeviceChanged()
+{
+ updateSessionState();
+}
+
+void PlatformMediaSessionManager::stopAllMediaPlaybackForDocument(const Document* document)
+{
+ forEachSession([document] (PlatformMediaSession& session, size_t) {
+ if (session.client().hostingDocument() == document)
+ session.pauseSession();
+ });
+}
+
+void PlatformMediaSessionManager::stopAllMediaPlaybackForProcess()
+{
+ forEachSession([] (PlatformMediaSession& session, size_t) {
+ session.stopSession();
+ });
+}
+
+void PlatformMediaSessionManager::forEachSession(const Function<void(PlatformMediaSession&, size_t)>& predicate) const
+{
+ ++m_iteratingOverSessions;
+
+ for (size_t i = 0, size = m_sessions.size(); i < size; ++i) {
+ auto session = m_sessions[i];
+ if (!session)
+ continue;
+ predicate(*session, i);
+ }
+
+ --m_iteratingOverSessions;
+ if (!m_iteratingOverSessions)
+ m_sessions.removeAll(nullptr);
+}
+
+PlatformMediaSession* PlatformMediaSessionManager::findSession(const Function<bool(PlatformMediaSession&, size_t)>& predicate) const
+{
+ ++m_iteratingOverSessions;
+
+ PlatformMediaSession* foundSession = nullptr;
+ for (size_t i = 0, size = m_sessions.size(); i < size; ++i) {
+ auto session = m_sessions[i];
+ if (!session)
+ continue;
+
+ if (!predicate(*session, i))
+ continue;
+
+ foundSession = session;
+ break;
+ }
+
+ --m_iteratingOverSessions;
+ if (!m_iteratingOverSessions)
+ m_sessions.removeAll(nullptr);
+
+ return foundSession;
+}
+
+#endif // ENABLE(VIDEO) || ENABLE(WEB_AUDIO)
+
+} // namespace WebCore
diff --git a/Source/WebCore/platform/audio/PlatformMediaSessionManager.h b/Source/WebCore/platform/audio/PlatformMediaSessionManager.h
new file mode 100644
index 000000000..55f7a5f61
--- /dev/null
+++ b/Source/WebCore/platform/audio/PlatformMediaSessionManager.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PlatformMediaSessionManager_h
+#define PlatformMediaSessionManager_h
+
+#include "AudioHardwareListener.h"
+#include "PlatformMediaSession.h"
+#include "RemoteCommandListener.h"
+#include "SystemSleepListener.h"
+#include <map>
+#include <wtf/Vector.h>
+
+namespace WebCore {
+
+class Document;
+class HTMLMediaElement;
+class PlatformMediaSession;
+class RemoteCommandListener;
+
+class PlatformMediaSessionManager : private RemoteCommandListenerClient, private SystemSleepListener::Client, private AudioHardwareListener::Client {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ WEBCORE_EXPORT static PlatformMediaSessionManager* sharedManagerIfExists();
+ WEBCORE_EXPORT static PlatformMediaSessionManager& sharedManager();
+
+ static void updateNowPlayingInfoIfNecessary();
+
+ virtual ~PlatformMediaSessionManager() { }
+
+ virtual void scheduleUpdateNowPlayingInfo() { }
+ bool has(PlatformMediaSession::MediaType) const;
+ int count(PlatformMediaSession::MediaType) const;
+ bool activeAudioSessionRequired() const;
+ bool canProduceAudio() const;
+
+ WEBCORE_EXPORT virtual bool hasActiveNowPlayingSession() const { return false; }
+ WEBCORE_EXPORT virtual String lastUpdatedNowPlayingTitle() const { return emptyString(); }
+ WEBCORE_EXPORT virtual double lastUpdatedNowPlayingDuration() const { return NAN; }
+ WEBCORE_EXPORT virtual double lastUpdatedNowPlayingElapsedTime() const { return NAN; }
+
+ bool willIgnoreSystemInterruptions() const { return m_willIgnoreSystemInterruptions; }
+ void setWillIgnoreSystemInterruptions(bool ignore) { m_willIgnoreSystemInterruptions = ignore; }
+
+ WEBCORE_EXPORT void beginInterruption(PlatformMediaSession::InterruptionType);
+ WEBCORE_EXPORT void endInterruption(PlatformMediaSession::EndInterruptionFlags);
+
+ WEBCORE_EXPORT void applicationDidEnterForeground() const;
+ WEBCORE_EXPORT void applicationWillEnterBackground() const;
+
+ void stopAllMediaPlaybackForDocument(const Document*);
+ WEBCORE_EXPORT void stopAllMediaPlaybackForProcess();
+
+ enum SessionRestrictionFlags {
+ NoRestrictions = 0,
+ ConcurrentPlaybackNotPermitted = 1 << 0,
+ BackgroundProcessPlaybackRestricted = 1 << 1,
+ BackgroundTabPlaybackRestricted = 1 << 2,
+ InterruptedPlaybackNotPermitted = 1 << 3,
+ };
+ typedef unsigned SessionRestrictions;
+
+ WEBCORE_EXPORT void addRestriction(PlatformMediaSession::MediaType, SessionRestrictions);
+ WEBCORE_EXPORT void removeRestriction(PlatformMediaSession::MediaType, SessionRestrictions);
+ WEBCORE_EXPORT SessionRestrictions restrictions(PlatformMediaSession::MediaType);
+ virtual void resetRestrictions();
+
+ virtual bool sessionWillBeginPlayback(PlatformMediaSession&);
+ virtual void sessionWillEndPlayback(PlatformMediaSession&);
+ virtual bool sessionCanLoadMedia(const PlatformMediaSession&) const;
+ virtual void sessionDidEndRemoteScrubbing(const PlatformMediaSession&) { };
+ virtual void clientCharacteristicsChanged(PlatformMediaSession&) { }
+
+#if PLATFORM(IOS)
+ virtual void configureWireLessTargetMonitoring() { }
+ virtual bool hasWirelessTargetsAvailable() { return false; }
+#endif
+
+ void setCurrentSession(PlatformMediaSession&);
+ PlatformMediaSession* currentSession() const;
+
+ Vector<PlatformMediaSession*> currentSessionsMatching(std::function<bool(const PlatformMediaSession&)>);
+
+ void sessionIsPlayingToWirelessPlaybackTargetChanged(PlatformMediaSession&);
+ void sessionCanProduceAudioChanged(PlatformMediaSession&);
+
+protected:
+ friend class PlatformMediaSession;
+ explicit PlatformMediaSessionManager();
+
+ void addSession(PlatformMediaSession&);
+ virtual void removeSession(PlatformMediaSession&);
+
+ void forEachSession(const Function<void(PlatformMediaSession&, size_t)>&) const;
+ PlatformMediaSession* findSession(const Function<bool(PlatformMediaSession&, size_t)>&) const;
+ bool anyOfSessions(const Function<bool(PlatformMediaSession&, size_t)>& predicate) const { return findSession(predicate); }
+
+private:
+ friend class Internals;
+
+ void updateSessionState();
+
+ // RemoteCommandListenerClient
+ WEBCORE_EXPORT void didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType, const PlatformMediaSession::RemoteCommandArgument*) override;
+ WEBCORE_EXPORT bool supportsSeeking() const override;
+
+ // AudioHardwareListenerClient
+ void audioHardwareDidBecomeActive() override { }
+ void audioHardwareDidBecomeInactive() override { }
+ void audioOutputDeviceChanged() override;
+
+ // SystemSleepListener
+ void systemWillSleep() override;
+ void systemDidWake() override;
+
+ SessionRestrictions m_restrictions[PlatformMediaSession::WebAudio + 1];
+ mutable Vector<PlatformMediaSession*> m_sessions;
+ std::unique_ptr<RemoteCommandListener> m_remoteCommandListener;
+ std::unique_ptr<SystemSleepListener> m_systemSleepListener;
+ RefPtr<AudioHardwareListener> m_audioHardwareListener;
+
+#if ENABLE(WIRELESS_PLAYBACK_TARGET) && !PLATFORM(IOS)
+ RefPtr<MediaPlaybackTarget> m_playbackTarget;
+ bool m_canPlayToTarget { false };
+#endif
+
+ bool m_interrupted { false };
+ mutable bool m_isApplicationInBackground { false };
+ bool m_willIgnoreSystemInterruptions { false };
+ mutable int m_iteratingOverSessions { 0 };
+};
+
+}
+
+#endif // PlatformMediaSessionManager_h
diff --git a/Source/WebCore/platform/audio/Reverb.cpp b/Source/WebCore/platform/audio/Reverb.cpp
index 87a7fbd29..3043fc8a0 100644
--- a/Source/WebCore/platform/audio/Reverb.cpp
+++ b/Source/WebCore/platform/audio/Reverb.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/Reverb.h b/Source/WebCore/platform/audio/Reverb.h
index 31e70f5ee..77c34fedb 100644
--- a/Source/WebCore/platform/audio/Reverb.h
+++ b/Source/WebCore/platform/audio/Reverb.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/ReverbAccumulationBuffer.cpp b/Source/WebCore/platform/audio/ReverbAccumulationBuffer.cpp
index 3d694d161..12742285a 100644
--- a/Source/WebCore/platform/audio/ReverbAccumulationBuffer.cpp
+++ b/Source/WebCore/platform/audio/ReverbAccumulationBuffer.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/ReverbAccumulationBuffer.h b/Source/WebCore/platform/audio/ReverbAccumulationBuffer.h
index f5ead2a66..d72756f49 100644
--- a/Source/WebCore/platform/audio/ReverbAccumulationBuffer.h
+++ b/Source/WebCore/platform/audio/ReverbAccumulationBuffer.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/ReverbConvolver.cpp b/Source/WebCore/platform/audio/ReverbConvolver.cpp
index 7f7b119f0..96f330828 100644
--- a/Source/WebCore/platform/audio/ReverbConvolver.cpp
+++ b/Source/WebCore/platform/audio/ReverbConvolver.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -34,6 +34,7 @@
#include "VectorMath.h"
#include "AudioBus.h"
+#include <mutex>
namespace WebCore {
@@ -107,10 +108,10 @@ ReverbConvolver::ReverbConvolver(AudioChannel* impulseResponse, size_t renderSli
bool isBackgroundStage = false;
if (this->useBackgroundThreads() && stageOffset > RealtimeFrameLimit) {
- m_backgroundStages.append(std::move(stage));
+ m_backgroundStages.append(WTFMove(stage));
isBackgroundStage = true;
} else
- m_stages.append(std::move(stage));
+ m_stages.append(WTFMove(stage));
stageOffset += stageSize;
++i;
@@ -140,9 +141,9 @@ ReverbConvolver::~ReverbConvolver()
// Wake up thread so it can return
{
- std::lock_guard<std::mutex> lock(m_backgroundThreadMutex);
+ std::lock_guard<Lock> lock(m_backgroundThreadMutex);
m_moreInputBuffered = true;
- m_backgroundThreadConditionVariable.notify_one();
+ m_backgroundThreadConditionVariable.notifyOne();
}
waitForThreadCompletion(m_backgroundThread);
@@ -155,7 +156,7 @@ void ReverbConvolver::backgroundThreadEntry()
// Wait for realtime thread to give us more input
m_moreInputBuffered = false;
{
- std::unique_lock<std::mutex> lock(m_backgroundThreadMutex);
+ std::unique_lock<Lock> lock(m_backgroundThreadMutex);
m_backgroundThreadConditionVariable.wait(lock, [this] { return m_moreInputBuffered || m_wantsToExit; });
}
@@ -209,12 +210,12 @@ void ReverbConvolver::process(const AudioChannel* sourceChannel, AudioChannel* d
// signal from time to time, since we'll get to it the next time we're called. We're called repeatedly
// and frequently (around every 3ms). The background thread is processing well into the future and has a considerable amount of
// leeway here...
- std::unique_lock<std::mutex> lock(m_backgroundThreadMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_backgroundThreadMutex, std::try_to_lock);
if (!lock.owns_lock())
return;
m_moreInputBuffered = true;
- m_backgroundThreadConditionVariable.notify_one();
+ m_backgroundThreadConditionVariable.notifyOne();
}
void ReverbConvolver::reset()
diff --git a/Source/WebCore/platform/audio/ReverbConvolver.h b/Source/WebCore/platform/audio/ReverbConvolver.h
index 21897b8f6..6baeb7cce 100644
--- a/Source/WebCore/platform/audio/ReverbConvolver.h
+++ b/Source/WebCore/platform/audio/ReverbConvolver.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2010 Google Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -10,7 +11,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -35,9 +36,9 @@
#include "ReverbAccumulationBuffer.h"
#include "ReverbConvolverStage.h"
#include "ReverbInputBuffer.h"
-#include <condition_variable>
#include <memory>
-#include <mutex>
+#include <wtf/Condition.h>
+#include <wtf/Lock.h>
#include <wtf/RefCounted.h>
#include <wtf/Threading.h>
#include <wtf/Vector.h>
@@ -88,8 +89,8 @@ private:
ThreadIdentifier m_backgroundThread;
bool m_wantsToExit;
bool m_moreInputBuffered;
- mutable std::mutex m_backgroundThreadMutex;
- mutable std::condition_variable m_backgroundThreadConditionVariable;
+ mutable Lock m_backgroundThreadMutex;
+ mutable Condition m_backgroundThreadConditionVariable;
};
} // namespace WebCore
diff --git a/Source/WebCore/platform/audio/ReverbConvolverStage.cpp b/Source/WebCore/platform/audio/ReverbConvolverStage.cpp
index 1832ea539..978044b3c 100644
--- a/Source/WebCore/platform/audio/ReverbConvolverStage.cpp
+++ b/Source/WebCore/platform/audio/ReverbConvolverStage.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -57,8 +57,11 @@ ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t,
m_fftKernel->doPaddedFFT(impulseResponse + stageOffset, stageLength);
m_fftConvolver = std::make_unique<FFTConvolver>(fftSize);
} else {
+ ASSERT(!stageOffset);
+ ASSERT(stageLength <= fftSize / 2);
+
m_directKernel = std::make_unique<AudioFloatArray>(fftSize / 2);
- m_directKernel->copyToRange(impulseResponse + stageOffset, 0, fftSize / 2);
+ m_directKernel->copyToRange(impulseResponse, 0, stageLength);
m_directConvolver = std::make_unique<DirectConvolver>(renderSliceSize);
}
m_temporaryBuffer.allocate(renderSliceSize);
diff --git a/Source/WebCore/platform/audio/ReverbConvolverStage.h b/Source/WebCore/platform/audio/ReverbConvolverStage.h
index 331528ba7..fd9e6927f 100644
--- a/Source/WebCore/platform/audio/ReverbConvolverStage.h
+++ b/Source/WebCore/platform/audio/ReverbConvolverStage.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/ReverbInputBuffer.cpp b/Source/WebCore/platform/audio/ReverbInputBuffer.cpp
index 1be9af884..d4025375f 100644
--- a/Source/WebCore/platform/audio/ReverbInputBuffer.cpp
+++ b/Source/WebCore/platform/audio/ReverbInputBuffer.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/ReverbInputBuffer.h b/Source/WebCore/platform/audio/ReverbInputBuffer.h
index 5036575b9..4c5561c69 100644
--- a/Source/WebCore/platform/audio/ReverbInputBuffer.h
+++ b/Source/WebCore/platform/audio/ReverbInputBuffer.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/SincResampler.cpp b/Source/WebCore/platform/audio/SincResampler.cpp
index 06ef554cf..e79d6c18d 100644
--- a/Source/WebCore/platform/audio/SincResampler.cpp
+++ b/Source/WebCore/platform/audio/SincResampler.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -35,7 +35,7 @@
#include "AudioBus.h"
#include <wtf/MathExtras.h>
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
#include <emmintrin.h>
#endif
@@ -153,7 +153,7 @@ public:
}
// Consumes samples from the in-memory buffer.
- virtual void provideInput(AudioBus* bus, size_t framesToProcess)
+ void provideInput(AudioBus* bus, size_t framesToProcess) override
{
ASSERT(m_source && bus);
if (!m_source || !bus)
@@ -260,7 +260,7 @@ void SincResampler::process(AudioSourceProvider* sourceProvider, float* destinat
{
float input;
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed seperately.
while ((reinterpret_cast<uintptr_t>(inputP) & 0x0F) && n) {
CONVOLVE_ONE_SAMPLE
diff --git a/Source/WebCore/platform/audio/SincResampler.h b/Source/WebCore/platform/audio/SincResampler.h
index 04dbf3fa1..2fcd89f7e 100644
--- a/Source/WebCore/platform/audio/SincResampler.h
+++ b/Source/WebCore/platform/audio/SincResampler.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/UpSampler.cpp b/Source/WebCore/platform/audio/UpSampler.cpp
index 87defe3b5..0dc078569 100644
--- a/Source/WebCore/platform/audio/UpSampler.cpp
+++ b/Source/WebCore/platform/audio/UpSampler.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/UpSampler.h b/Source/WebCore/platform/audio/UpSampler.h
index 9a861c2f2..8b9c73d4d 100644
--- a/Source/WebCore/platform/audio/UpSampler.h
+++ b/Source/WebCore/platform/audio/UpSampler.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/VectorMath.cpp b/Source/WebCore/platform/audio/VectorMath.cpp
index 10887445e..7e8d44f3d 100644
--- a/Source/WebCore/platform/audio/VectorMath.cpp
+++ b/Source/WebCore/platform/audio/VectorMath.cpp
@@ -28,11 +28,11 @@
#include "VectorMath.h"
-#if OS(DARWIN)
+#if USE(ACCELERATE)
#include <Accelerate/Accelerate.h>
#endif
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
#include <emmintrin.h>
#endif
@@ -47,7 +47,7 @@ namespace WebCore {
namespace VectorMath {
-#if OS(DARWIN)
+#if USE(ACCELERATE)
// On the Mac we use the highly optimized versions in Accelerate.framework
// In 32-bit mode (__ppc__ or __i386__) <Accelerate/Accelerate.h> includes <vecLib/vDSP_translate.h> which defines macros of the same name as
// our namespaced function names, so we must handle this case differently. Other architectures (64bit, ARM, etc.) do not include this header file.
@@ -55,7 +55,10 @@ namespace VectorMath {
void vsmul(const float* sourceP, int sourceStride, const float* scale, float* destP, int destStride, size_t framesToProcess)
{
#if defined(__ppc__) || defined(__i386__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
::vsmul(sourceP, sourceStride, scale, destP, destStride, framesToProcess);
+#pragma clang diagnostic pop
#else
vDSP_vsmul(sourceP, sourceStride, scale, destP, destStride, framesToProcess);
#endif
@@ -64,7 +67,10 @@ void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de
void vadd(const float* source1P, int sourceStride1, const float* source2P, int sourceStride2, float* destP, int destStride, size_t framesToProcess)
{
#if defined(__ppc__) || defined(__i386__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
::vadd(source1P, sourceStride1, source2P, sourceStride2, destP, destStride, framesToProcess);
+#pragma clang diagnostic pop
#else
vDSP_vadd(source1P, sourceStride1, source2P, sourceStride2, destP, destStride, framesToProcess);
#endif
@@ -73,7 +79,10 @@ void vadd(const float* source1P, int sourceStride1, const float* source2P, int s
void vmul(const float* source1P, int sourceStride1, const float* source2P, int sourceStride2, float* destP, int destStride, size_t framesToProcess)
{
#if defined(__ppc__) || defined(__i386__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
::vmul(source1P, sourceStride1, source2P, sourceStride2, destP, destStride, framesToProcess);
+#pragma clang diagnostic pop
#else
vDSP_vmul(source1P, sourceStride1, source2P, sourceStride2, destP, destStride, framesToProcess);
#endif
@@ -91,7 +100,10 @@ void zvmul(const float* real1P, const float* imag1P, const float* real2P, const
dest.realp = realDestP;
dest.imagp = imagDestP;
#if defined(__ppc__) || defined(__i386__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
::zvmul(&sc1, 1, &sc2, 1, &dest, 1, framesToProcess, 1);
+#pragma clang diagnostic pop
#else
vDSP_zvmul(&sc1, 1, &sc2, 1, &dest, 1, framesToProcess, 1);
#endif
@@ -122,7 +134,7 @@ void vsma(const float* sourceP, int sourceStride, const float* scale, float* des
{
int n = framesToProcess;
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
if ((sourceStride == 1) && (destStride == 1)) {
float k = *scale;
@@ -195,7 +207,7 @@ void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de
{
int n = framesToProcess;
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
if ((sourceStride == 1) && (destStride == 1)) {
float k = *scale;
@@ -266,7 +278,7 @@ void vsmul(const float* sourceP, int sourceStride, const float* scale, float* de
sourceP += sourceStride;
destP += destStride;
}
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
}
#endif
}
@@ -275,7 +287,7 @@ void vadd(const float* source1P, int sourceStride1, const float* source2P, int s
{
int n = framesToProcess;
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
if ((sourceStride1 ==1) && (sourceStride2 == 1) && (destStride == 1)) {
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately.
while ((reinterpret_cast<size_t>(source1P) & 0x0F) && n) {
@@ -378,7 +390,7 @@ void vadd(const float* source1P, int sourceStride1, const float* source2P, int s
source2P += sourceStride2;
destP += destStride;
}
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
}
#endif
}
@@ -388,7 +400,7 @@ void vmul(const float* source1P, int sourceStride1, const float* source2P, int s
int n = framesToProcess;
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
if ((sourceStride1 == 1) && (sourceStride2 == 1) && (destStride == 1)) {
// If the source1P address is not 16-byte aligned, the first several frames (at most three) should be processed separately.
while ((reinterpret_cast<uintptr_t>(source1P) & 0x0F) && n) {
@@ -461,7 +473,7 @@ void vmul(const float* source1P, int sourceStride1, const float* source2P, int s
void zvmul(const float* real1P, const float* imag1P, const float* real2P, const float* imag2P, float* realDestP, float* imagDestP, size_t framesToProcess)
{
unsigned i = 0;
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
// Only use the SSE optimization in the very common case that all addresses are 16-byte aligned.
// Otherwise, fall through to the scalar code below.
if (!(reinterpret_cast<uintptr_t>(real1P) & 0x0F)
@@ -519,7 +531,7 @@ void vsvesq(const float* sourceP, int sourceStride, float* sumP, size_t framesTo
int n = framesToProcess;
float sum = 0;
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
if (sourceStride == 1) {
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately.
while ((reinterpret_cast<uintptr_t>(sourceP) & 0x0F) && n) {
@@ -584,7 +596,7 @@ void vmaxmgv(const float* sourceP, int sourceStride, float* maxP, size_t framesT
int n = framesToProcess;
float max = 0;
-#ifdef __SSE2__
+#if CPU(X86_SSE2)
if (sourceStride == 1) {
// If the sourceP address is not 16-byte aligned, the first several frames (at most three) should be processed separately.
while ((reinterpret_cast<uintptr_t>(sourceP) & 0x0F) && n) {
@@ -678,7 +690,7 @@ void vclip(const float* sourceP, int sourceStride, const float* lowThresholdP, c
}
}
-#endif // OS(DARWIN)
+#endif // USE(ACCELERATE)
} // namespace VectorMath
diff --git a/Source/WebCore/platform/audio/WebAudioBufferList.cpp b/Source/WebCore/platform/audio/WebAudioBufferList.cpp
new file mode 100644
index 000000000..37b5125f1
--- /dev/null
+++ b/Source/WebCore/platform/audio/WebAudioBufferList.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "WebAudioBufferList.h"
+
+#include "CAAudioStreamDescription.h"
+#include "CoreMediaSoftLink.h"
+
+namespace WebCore {
+
+WebAudioBufferList::WebAudioBufferList(const CAAudioStreamDescription& format)
+{
+ // AudioBufferList is a variable-length struct, so create on the heap with a generic new() operator
+ // with a custom size, and initialize the struct manually.
+ uint32_t bufferCount = format.numberOfChannelStreams();
+ uint32_t channelCount = format.numberOfInterleavedChannels();
+
+ uint64_t bufferListSize = offsetof(AudioBufferList, mBuffers) + (sizeof(AudioBuffer) * std::max(1U, bufferCount));
+ ASSERT(bufferListSize <= SIZE_MAX);
+
+ m_listBufferSize = static_cast<size_t>(bufferListSize);
+ m_list = std::unique_ptr<AudioBufferList>(static_cast<AudioBufferList*>(::operator new (m_listBufferSize)));
+ memset(m_list.get(), 0, m_listBufferSize);
+ m_list->mNumberBuffers = bufferCount;
+ for (uint32_t buffer = 0; buffer < bufferCount; ++buffer)
+ m_list->mBuffers[buffer].mNumberChannels = channelCount;
+}
+
+WebAudioBufferList::WebAudioBufferList(const CAAudioStreamDescription& format, uint32_t sampleCount)
+ : WebAudioBufferList(format)
+{
+ if (!sampleCount)
+ return;
+
+ uint32_t bufferCount = format.numberOfChannelStreams();
+ uint32_t channelCount = format.numberOfInterleavedChannels();
+
+ size_t bytesPerBuffer = sampleCount * channelCount * format.bytesPerFrame();
+ m_flatBuffer.reserveInitialCapacity(bufferCount * bytesPerBuffer);
+ auto data = m_flatBuffer.data();
+
+ for (uint32_t buffer = 0; buffer < m_list->mNumberBuffers; ++buffer) {
+ m_list->mBuffers[buffer].mData = data;
+ m_list->mBuffers[buffer].mDataByteSize = bytesPerBuffer;
+ data += bytesPerBuffer;
+ }
+}
+
+WebAudioBufferList::WebAudioBufferList(const CAAudioStreamDescription& format, CMSampleBufferRef sampleBuffer)
+ : WebAudioBufferList(format)
+{
+ if (!sampleBuffer)
+ return;
+
+ CMBlockBufferRef buffer = nullptr;
+ if (noErr == CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, nullptr, m_list.get(), m_listBufferSize, kCFAllocatorSystemDefault, kCFAllocatorSystemDefault, kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment, &buffer))
+ m_blockBuffer = adoptCF(buffer);
+}
+
+WTF::IteratorRange<AudioBuffer*> WebAudioBufferList::buffers() const
+{
+ return WTF::makeIteratorRange(&m_list->mBuffers[0], &m_list->mBuffers[m_list->mNumberBuffers]);
+}
+
+uint32_t WebAudioBufferList::bufferCount() const
+{
+ return m_list->mNumberBuffers;
+}
+
+AudioBuffer* WebAudioBufferList::buffer(uint32_t index) const
+{
+ ASSERT(index < m_list->mNumberBuffers);
+ if (index < m_list->mNumberBuffers)
+ return &m_list->mBuffers[index];
+ return nullptr;
+}
+
+}
diff --git a/Source/WebCore/platform/audio/WebAudioBufferList.h b/Source/WebCore/platform/audio/WebAudioBufferList.h
new file mode 100644
index 000000000..bbf3c139b
--- /dev/null
+++ b/Source/WebCore/platform/audio/WebAudioBufferList.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "PlatformAudioData.h"
+#include <wtf/IteratorRange.h>
+#include <wtf/RetainPtr.h>
+#include <wtf/Vector.h>
+
+struct AudioBuffer;
+struct AudioBufferList;
+typedef struct OpaqueCMBlockBuffer *CMBlockBufferRef;
+typedef struct opaqueCMSampleBuffer *CMSampleBufferRef;
+
+namespace WebCore {
+
+class CAAudioStreamDescription;
+
+class WebAudioBufferList : public PlatformAudioData {
+public:
+ WebAudioBufferList(const CAAudioStreamDescription&);
+ WebAudioBufferList(const CAAudioStreamDescription&, uint32_t sampleCount);
+ WebAudioBufferList(const CAAudioStreamDescription&, CMSampleBufferRef);
+
+ AudioBufferList* list() const { return m_list.get(); }
+ operator AudioBufferList&() const { return *m_list; }
+
+ uint32_t bufferCount() const;
+ AudioBuffer* buffer(uint32_t index) const;
+ WTF::IteratorRange<AudioBuffer*> buffers() const;
+
+private:
+ Kind kind() const { return Kind::WebAudioBufferList; }
+
+ size_t m_listBufferSize { 0 };
+ std::unique_ptr<AudioBufferList> m_list;
+ RetainPtr<CMBlockBufferRef> m_blockBuffer;
+ Vector<uint8_t> m_flatBuffer;
+};
+
+}
+
+SPECIALIZE_TYPE_TRAITS_BEGIN(WebCore::WebAudioBufferList)
+static bool isType(const WebCore::PlatformAudioData& data) { return data.kind() == WebCore::PlatformAudioData::Kind::WebAudioBufferList; }
+SPECIALIZE_TYPE_TRAITS_END()
diff --git a/Source/WebCore/platform/audio/ZeroPole.cpp b/Source/WebCore/platform/audio/ZeroPole.cpp
index 9e6f1b635..39d196f2b 100644
--- a/Source/WebCore/platform/audio/ZeroPole.cpp
+++ b/Source/WebCore/platform/audio/ZeroPole.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/ZeroPole.h b/Source/WebCore/platform/audio/ZeroPole.h
index 4cb1d1745..831de9093 100644
--- a/Source/WebCore/platform/audio/ZeroPole.h
+++ b/Source/WebCore/platform/audio/ZeroPole.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/platform/audio/gtk/AudioBusGtk.cpp b/Source/WebCore/platform/audio/glib/AudioBusGLib.cpp
index 2adee9bac..791bfa396 100644
--- a/Source/WebCore/platform/audio/gtk/AudioBusGtk.cpp
+++ b/Source/WebCore/platform/audio/glib/AudioBusGLib.cpp
@@ -23,23 +23,18 @@
#include "AudioBus.h"
#include "AudioFileReader.h"
-#include "FileSystem.h"
-#include <glib.h>
-#include <wtf/gobject/GUniquePtr.h>
-#include <wtf/text/CString.h>
+#include <gio/gio.h>
+#include <wtf/glib/GRefPtr.h>
+#include <wtf/glib/GUniquePtr.h>
namespace WebCore {
PassRefPtr<AudioBus> AudioBus::loadPlatformResource(const char* name, float sampleRate)
{
- GUniquePtr<gchar> filename(g_strdup_printf("%s.wav", name));
- const char* environmentPath = getenv("AUDIO_RESOURCES_PATH");
- GUniquePtr<gchar> absoluteFilename;
- if (environmentPath)
- absoluteFilename.reset(g_build_filename(environmentPath, filename.get(), NULL));
- else
- absoluteFilename.reset(g_build_filename(sharedResourcesPath().data(), "resources", "audio", filename.get(), NULL));
- return createBusFromAudioFile(absoluteFilename.get(), false, sampleRate);
+ GUniquePtr<char> path(g_strdup_printf("/org/webkitgtk/resources/audio/%s", name));
+ GRefPtr<GBytes> data = adoptGRef(g_resources_lookup_data(path.get(), G_RESOURCE_LOOKUP_FLAGS_NONE, nullptr));
+ ASSERT(data);
+ return createBusFromInMemoryAudioFile(g_bytes_get_data(data.get(), nullptr), g_bytes_get_size(data.get()), false, sampleRate);
}
} // namespace WebCore
diff --git a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp
index 25ddcb9fa..758389ced 100644
--- a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp
+++ b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2011, 2012 Igalia S.L
+ * Copyright (C) 2014 Sebastian Dröge <sebastian@centricular.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -27,9 +28,9 @@
#include "GRefPtrGStreamer.h"
#include "Logging.h"
#include "WebKitWebAudioSourceGStreamer.h"
+#include <gst/audio/gstaudiobasesink.h>
#include <gst/gst.h>
-#include <gst/pbutils/pbutils.h>
-#include <wtf/gobject/GUniquePtr.h>
+#include <wtf/glib/GUniquePtr.h>
namespace WebCore {
@@ -42,6 +43,12 @@ gboolean messageCallback(GstBus*, GstMessage* message, AudioDestinationGStreamer
return destination->handleMessage(message);
}
+static void autoAudioSinkChildAddedCallback(GstChildProxy*, GObject* object, gchar*, gpointer)
+{
+ if (GST_IS_AUDIO_BASE_SINK(object))
+ g_object_set(GST_AUDIO_BASE_SINK(object), "buffer-time", static_cast<gint64>(100000), nullptr);
+}
+
std::unique_ptr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
{
// FIXME: make use of inputDeviceId as appropriate.
@@ -85,45 +92,17 @@ AudioDestinationGStreamer::AudioDestinationGStreamer(AudioIOCallback& callback,
"rate", sampleRate,
"bus", m_renderBus.get(),
"provider", &m_callback,
- "frames", framesToPull, NULL));
-
- GstElement* wavParser = gst_element_factory_make("wavparse", 0);
-
- m_wavParserAvailable = wavParser;
- ASSERT_WITH_MESSAGE(m_wavParserAvailable, "Failed to create GStreamer wavparse element");
- if (!m_wavParserAvailable)
- return;
-
- gst_bin_add_many(GST_BIN(m_pipeline), webkitAudioSrc, wavParser, NULL);
- gst_element_link_pads_full(webkitAudioSrc, "src", wavParser, "sink", GST_PAD_LINK_CHECK_NOTHING);
-
- GRefPtr<GstPad> srcPad = adoptGRef(gst_element_get_static_pad(wavParser, "src"));
- finishBuildingPipelineAfterWavParserPadReady(srcPad.get());
-}
-
-AudioDestinationGStreamer::~AudioDestinationGStreamer()
-{
- GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline)));
- ASSERT(bus);
- g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
- gst_bus_remove_signal_watch(bus.get());
-
- gst_element_set_state(m_pipeline, GST_STATE_NULL);
- gst_object_unref(m_pipeline);
-}
-
-void AudioDestinationGStreamer::finishBuildingPipelineAfterWavParserPadReady(GstPad* pad)
-{
- ASSERT(m_wavParserAvailable);
+ "frames", framesToPull, nullptr));
- GRefPtr<GstElement> audioSink = gst_element_factory_make("autoaudiosink", 0);
+ GRefPtr<GstElement> audioSink = gst_element_factory_make("autoaudiosink", nullptr);
m_audioSinkAvailable = audioSink;
-
if (!audioSink) {
LOG_ERROR("Failed to create GStreamer autoaudiosink element");
return;
}
+ g_signal_connect(audioSink.get(), "child-added", G_CALLBACK(autoAudioSinkChildAddedCallback), nullptr);
+
// Autoaudiosink does the real sink detection in the GST_STATE_NULL->READY transition
// so it's best to roll it to READY as soon as possible to ensure the underlying platform
// audiosink was loaded correctly.
@@ -135,17 +114,25 @@ void AudioDestinationGStreamer::finishBuildingPipelineAfterWavParserPadReady(Gst
return;
}
- GstElement* audioConvert = gst_element_factory_make("audioconvert", 0);
- gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioSink.get(), NULL);
+ GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr);
+ GstElement* audioResample = gst_element_factory_make("audioresample", nullptr);
+ gst_bin_add_many(GST_BIN(m_pipeline), webkitAudioSrc, audioConvert, audioResample, audioSink.get(), nullptr);
- // Link wavparse's src pad to audioconvert sink pad.
- GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(audioConvert, "sink"));
- gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
+ // Link src pads from webkitAudioSrc to audioConvert ! audioResample ! autoaudiosink.
+ gst_element_link_pads_full(webkitAudioSrc, "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioResample, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
+}
- // Link audioconvert to audiosink and roll states.
- gst_element_link_pads_full(audioConvert, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
- gst_element_sync_state_with_parent(audioConvert);
- gst_element_sync_state_with_parent(audioSink.leakRef());
+AudioDestinationGStreamer::~AudioDestinationGStreamer()
+{
+ GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline)));
+ ASSERT(bus);
+ g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
+ gst_bus_remove_signal_watch(bus.get());
+
+ gst_element_set_state(m_pipeline, GST_STATE_NULL);
+ gst_object_unref(m_pipeline);
}
gboolean AudioDestinationGStreamer::handleMessage(GstMessage* message)
@@ -172,18 +159,23 @@ gboolean AudioDestinationGStreamer::handleMessage(GstMessage* message)
void AudioDestinationGStreamer::start()
{
- ASSERT(m_wavParserAvailable);
- if (!m_wavParserAvailable)
+ ASSERT(m_audioSinkAvailable);
+ if (!m_audioSinkAvailable)
return;
- gst_element_set_state(m_pipeline, GST_STATE_PLAYING);
+ if (gst_element_set_state(m_pipeline, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) {
+ g_warning("Error: Failed to set pipeline to playing");
+ m_isPlaying = false;
+ return;
+ }
+
m_isPlaying = true;
}
void AudioDestinationGStreamer::stop()
{
- ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
- if (!m_wavParserAvailable || !m_audioSinkAvailable)
+ ASSERT(m_audioSinkAvailable);
+ if (!m_audioSinkAvailable)
return;
gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
diff --git a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h
index 9dc9a9bea..3b89febc6 100644
--- a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h
+++ b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h
@@ -34,14 +34,13 @@ public:
AudioDestinationGStreamer(AudioIOCallback&, float sampleRate);
virtual ~AudioDestinationGStreamer();
- virtual void start();
- virtual void stop();
+ void start() override;
+ void stop() override;
- bool isPlaying() { return m_isPlaying; }
- float sampleRate() const { return m_sampleRate; }
+ bool isPlaying() override { return m_isPlaying; }
+ float sampleRate() const override { return m_sampleRate; }
AudioIOCallback& callback() const { return m_callback; }
- void finishBuildingPipelineAfterWavParserPadReady(GstPad*);
gboolean handleMessage(GstMessage*);
private:
@@ -50,7 +49,6 @@ private:
float m_sampleRate;
bool m_isPlaying;
- bool m_wavParserAvailable;
bool m_audioSinkAvailable;
GstElement* m_pipeline;
};
diff --git a/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp
index e687e572a..6cd8bd7f8 100644
--- a/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp
+++ b/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp
@@ -22,18 +22,19 @@
#if ENABLE(WEB_AUDIO)
#include "AudioFileReader.h"
-
#include "AudioBus.h"
-
+#include "GRefPtrGStreamer.h"
#include <gio/gio.h>
#include <gst/app/gstappsink.h>
+#include <gst/audio/audio-info.h>
#include <gst/gst.h>
-#include <gst/pbutils/pbutils.h>
+#include <wtf/MainThread.h>
#include <wtf/Noncopyable.h>
-#include <wtf/gobject/GRefPtr.h>
-#include <wtf/gobject/GUniquePtr.h>
-
-#include <gst/audio/audio.h>
+#include <wtf/RunLoop.h>
+#include <wtf/Threading.h>
+#include <wtf/WeakPtr.h>
+#include <wtf/glib/GRefPtr.h>
+#include <wtf/glib/GUniquePtr.h>
namespace WebCore {
@@ -46,28 +47,36 @@ public:
PassRefPtr<AudioBus> createBus(float sampleRate, bool mixToMono);
- GstFlowReturn handleSample(GstAppSink*);
- gboolean handleMessage(GstMessage*);
+private:
+ WeakPtr<AudioFileReader> createWeakPtr() { return m_weakPtrFactory.createWeakPtr(); }
+
+ static void deinterleavePadAddedCallback(AudioFileReader*, GstPad*);
+ static void deinterleaveReadyCallback(AudioFileReader*);
+ static void decodebinPadAddedCallback(AudioFileReader*, GstPad*);
+
+ void handleMessage(GstMessage*);
void handleNewDeinterleavePad(GstPad*);
void deinterleavePadsConfigured();
void plugDeinterleave(GstPad*);
void decodeAudioForBusCreation();
+ GstFlowReturn handleSample(GstAppSink*);
-private:
- const void* m_data;
- size_t m_dataSize;
- const char* m_filePath;
+ WeakPtrFactory<AudioFileReader> m_weakPtrFactory;
+ RunLoop& m_runLoop;
+ const void* m_data { nullptr };
+ size_t m_dataSize { 0 };
+ const char* m_filePath { nullptr };
- float m_sampleRate;
- GstBufferList* m_frontLeftBuffers;
- GstBufferList* m_frontRightBuffers;
+ float m_sampleRate { 0 };
+ int m_channels { 0 };
+ GRefPtr<GstBufferList> m_frontLeftBuffers;
+ GRefPtr<GstBufferList> m_frontRightBuffers;
- GstElement* m_pipeline;
- unsigned m_channelSize;
+ GRefPtr<GstElement> m_pipeline;
+ unsigned m_channelSize { 0 };
GRefPtr<GstElement> m_decodebin;
GRefPtr<GstElement> m_deInterleave;
- GRefPtr<GMainLoop> m_loop;
- bool m_errorOccurred;
+ bool m_errorOccurred { false };
};
static void copyGstreamerBuffersToAudioChannel(GstBufferList* buffers, AudioChannel* audioChannel)
@@ -83,132 +92,104 @@ static void copyGstreamerBuffersToAudioChannel(GstBufferList* buffers, AudioChan
}
}
-static GstFlowReturn onAppsinkPullRequiredCallback(GstAppSink* sink, gpointer userData)
-{
- return static_cast<AudioFileReader*>(userData)->handleSample(sink);
-}
-
-gboolean messageCallback(GstBus*, GstMessage* message, AudioFileReader* reader)
-{
- return reader->handleMessage(message);
-}
-
-static void onGStreamerDeinterleavePadAddedCallback(GstElement*, GstPad* pad, AudioFileReader* reader)
+void AudioFileReader::deinterleavePadAddedCallback(AudioFileReader* reader, GstPad* pad)
{
reader->handleNewDeinterleavePad(pad);
}
-static void onGStreamerDeinterleaveReadyCallback(GstElement*, AudioFileReader* reader)
+void AudioFileReader::deinterleaveReadyCallback(AudioFileReader* reader)
{
reader->deinterleavePadsConfigured();
}
-static void onGStreamerDecodebinPadAddedCallback(GstElement*, GstPad* pad, AudioFileReader* reader)
+void AudioFileReader::decodebinPadAddedCallback(AudioFileReader* reader, GstPad* pad)
{
reader->plugDeinterleave(pad);
}
-gboolean enteredMainLoopCallback(gpointer userData)
-{
- AudioFileReader* reader = reinterpret_cast<AudioFileReader*>(userData);
- reader->decodeAudioForBusCreation();
- return FALSE;
-}
-
AudioFileReader::AudioFileReader(const char* filePath)
- : m_data(0)
- , m_dataSize(0)
+ : m_weakPtrFactory(this)
+ , m_runLoop(RunLoop::current())
, m_filePath(filePath)
- , m_channelSize(0)
- , m_errorOccurred(false)
{
}
AudioFileReader::AudioFileReader(const void* data, size_t dataSize)
- : m_data(data)
+ : m_weakPtrFactory(this)
+ , m_runLoop(RunLoop::current())
+ , m_data(data)
, m_dataSize(dataSize)
- , m_filePath(0)
- , m_channelSize(0)
- , m_errorOccurred(false)
{
}
AudioFileReader::~AudioFileReader()
{
if (m_pipeline) {
- GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline)));
+ GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline.get())));
ASSERT(bus);
- g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this);
- gst_bus_remove_signal_watch(bus.get());
+ gst_bus_set_sync_handler(bus.get(), nullptr, nullptr, nullptr);
- gst_element_set_state(m_pipeline, GST_STATE_NULL);
- gst_object_unref(GST_OBJECT(m_pipeline));
+ gst_element_set_state(m_pipeline.get(), GST_STATE_NULL);
+ m_pipeline = nullptr;
}
if (m_decodebin) {
- g_signal_handlers_disconnect_by_func(m_decodebin.get(), reinterpret_cast<gpointer>(onGStreamerDecodebinPadAddedCallback), this);
- m_decodebin.clear();
+ g_signal_handlers_disconnect_matched(m_decodebin.get(), G_SIGNAL_MATCH_DATA, 0, 0, nullptr, nullptr, this);
+ m_decodebin = nullptr;
}
if (m_deInterleave) {
- g_signal_handlers_disconnect_by_func(m_deInterleave.get(), reinterpret_cast<gpointer>(onGStreamerDeinterleavePadAddedCallback), this);
- g_signal_handlers_disconnect_by_func(m_deInterleave.get(), reinterpret_cast<gpointer>(onGStreamerDeinterleaveReadyCallback), this);
- m_deInterleave.clear();
+ g_signal_handlers_disconnect_matched(m_deInterleave.get(), G_SIGNAL_MATCH_DATA, 0, 0, nullptr, nullptr, this);
+ m_deInterleave = nullptr;
}
-
- gst_buffer_list_unref(m_frontLeftBuffers);
- gst_buffer_list_unref(m_frontRightBuffers);
}
GstFlowReturn AudioFileReader::handleSample(GstAppSink* sink)
{
- GstSample* sample = gst_app_sink_pull_sample(sink);
+ GRefPtr<GstSample> sample = adoptGRef(gst_app_sink_pull_sample(sink));
if (!sample)
return GST_FLOW_ERROR;
- GstBuffer* buffer = gst_sample_get_buffer(sample);
- if (!buffer) {
- gst_sample_unref(sample);
+ GstBuffer* buffer = gst_sample_get_buffer(sample.get());
+ if (!buffer)
return GST_FLOW_ERROR;
- }
- GstCaps* caps = gst_sample_get_caps(sample);
- if (!caps) {
- gst_sample_unref(sample);
+ GstCaps* caps = gst_sample_get_caps(sample.get());
+ if (!caps)
return GST_FLOW_ERROR;
- }
GstAudioInfo info;
gst_audio_info_from_caps(&info, caps);
- int frames = GST_CLOCK_TIME_TO_FRAMES(GST_BUFFER_DURATION(buffer), GST_AUDIO_INFO_RATE(&info));
+ int frames = gst_buffer_get_size(buffer) / info.bpf;
// Check the first audio channel. The buffer is supposed to store
// data of a single channel anyway.
switch (GST_AUDIO_INFO_POSITION(&info, 0)) {
case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
- gst_buffer_list_add(m_frontLeftBuffers, gst_buffer_ref(buffer));
+ case GST_AUDIO_CHANNEL_POSITION_MONO:
+ gst_buffer_list_add(m_frontLeftBuffers.get(), gst_buffer_ref(buffer));
m_channelSize += frames;
break;
case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
- gst_buffer_list_add(m_frontRightBuffers, gst_buffer_ref(buffer));
+ gst_buffer_list_add(m_frontRightBuffers.get(), gst_buffer_ref(buffer));
break;
default:
break;
}
- gst_sample_unref(sample);
return GST_FLOW_OK;
-
}
-gboolean AudioFileReader::handleMessage(GstMessage* message)
+void AudioFileReader::handleMessage(GstMessage* message)
{
+ ASSERT(&m_runLoop == &RunLoop::current());
+
GUniqueOutPtr<GError> error;
GUniqueOutPtr<gchar> debug;
switch (GST_MESSAGE_TYPE(message)) {
case GST_MESSAGE_EOS:
- g_main_loop_quit(m_loop.get());
+ m_runLoop.stop();
break;
case GST_MESSAGE_WARNING:
gst_message_parse_warning(message, &error.outPtr(), &debug.outPtr());
@@ -218,12 +199,12 @@ gboolean AudioFileReader::handleMessage(GstMessage* message)
gst_message_parse_error(message, &error.outPtr(), &debug.outPtr());
g_warning("Error: %d, %s. Debug output: %s", error->code, error->message, debug.get());
m_errorOccurred = true;
- g_main_loop_quit(m_loop.get());
+ gst_element_set_state(m_pipeline.get(), GST_STATE_NULL);
+ m_runLoop.stop();
break;
default:
break;
}
- return TRUE;
}
void AudioFileReader::handleNewDeinterleavePad(GstPad* pad)
@@ -232,62 +213,69 @@ void AudioFileReader::handleNewDeinterleavePad(GstPad* pad)
// in an appsink so we can pull the data from each
// channel. Pipeline looks like:
// ... deinterleave ! queue ! appsink.
- GstElement* queue = gst_element_factory_make("queue", 0);
- GstElement* sink = gst_element_factory_make("appsink", 0);
+ GstElement* queue = gst_element_factory_make("queue", nullptr);
+ GstElement* sink = gst_element_factory_make("appsink", nullptr);
- GstAppSinkCallbacks callbacks;
- callbacks.eos = 0;
- callbacks.new_preroll = 0;
- callbacks.new_sample = onAppsinkPullRequiredCallback;
- gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, 0);
+ static GstAppSinkCallbacks callbacks = {
+ nullptr, // eos
+ nullptr, // new_preroll
+ // new_sample
+ [](GstAppSink* sink, gpointer userData) -> GstFlowReturn {
+ return static_cast<AudioFileReader*>(userData)->handleSample(sink);
+ },
+ { nullptr }
+ };
+ gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, nullptr);
- g_object_set(sink, "sync", FALSE, NULL);
+ g_object_set(sink, "sync", FALSE, nullptr);
- gst_bin_add_many(GST_BIN(m_pipeline), queue, sink, NULL);
+ gst_bin_add_many(GST_BIN(m_pipeline.get()), queue, sink, nullptr);
- GstPad* sinkPad = gst_element_get_static_pad(queue, "sink");
- gst_pad_link_full(pad, sinkPad, GST_PAD_LINK_CHECK_NOTHING);
- gst_object_unref(GST_OBJECT(sinkPad));
+ GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink"));
+ gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);
- gst_element_set_state(queue, GST_STATE_READY);
- gst_element_set_state(sink, GST_STATE_READY);
+ gst_element_sync_state_with_parent(queue);
+ gst_element_sync_state_with_parent(sink);
}
void AudioFileReader::deinterleavePadsConfigured()
{
// All deinterleave src pads are now available, let's roll to
// PLAYING so data flows towards the sinks and it can be retrieved.
- gst_element_set_state(m_pipeline, GST_STATE_PLAYING);
+ gst_element_set_state(m_pipeline.get(), GST_STATE_PLAYING);
}
void AudioFileReader::plugDeinterleave(GstPad* pad)
{
+ // Ignore any additional source pads just in case.
+ if (m_deInterleave)
+ return;
+
// A decodebin pad was added, plug in a deinterleave element to
// separate each planar channel. Sub pipeline looks like
// ... decodebin2 ! audioconvert ! audioresample ! capsfilter ! deinterleave.
- GstElement* audioConvert = gst_element_factory_make("audioconvert", 0);
- GstElement* audioResample = gst_element_factory_make("audioresample", 0);
- GstElement* capsFilter = gst_element_factory_make("capsfilter", 0);
+ GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr);
+ GstElement* audioResample = gst_element_factory_make("audioresample", nullptr);
+ GstElement* capsFilter = gst_element_factory_make("capsfilter", nullptr);
m_deInterleave = gst_element_factory_make("deinterleave", "deinterleave");
- g_object_set(m_deInterleave.get(), "keep-positions", TRUE, NULL);
- g_signal_connect(m_deInterleave.get(), "pad-added", G_CALLBACK(onGStreamerDeinterleavePadAddedCallback), this);
- g_signal_connect(m_deInterleave.get(), "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this);
+ g_object_set(m_deInterleave.get(), "keep-positions", TRUE, nullptr);
+ g_signal_connect_swapped(m_deInterleave.get(), "pad-added", G_CALLBACK(deinterleavePadAddedCallback), this);
+ g_signal_connect_swapped(m_deInterleave.get(), "no-more-pads", G_CALLBACK(deinterleaveReadyCallback), this);
- GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(m_sampleRate),
- "channels", G_TYPE_INT, 2,
- "format", G_TYPE_STRING, gst_audio_format_to_string(GST_AUDIO_FORMAT_F32),
- "layout", G_TYPE_STRING, "interleaved", nullptr);
- g_object_set(capsFilter, "caps", caps, NULL);
- gst_caps_unref(caps);
+ GRefPtr<GstCaps> caps = adoptGRef(gst_caps_new_simple("audio/x-raw",
+ "rate", G_TYPE_INT, static_cast<int>(m_sampleRate),
+ "channels", G_TYPE_INT, m_channels,
+ "format", G_TYPE_STRING, GST_AUDIO_NE(F32),
+ "layout", G_TYPE_STRING, "interleaved", nullptr));
+ g_object_set(capsFilter, "caps", caps.get(), nullptr);
- gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioResample, capsFilter, m_deInterleave.get(), NULL);
+ gst_bin_add_many(GST_BIN(m_pipeline.get()), audioConvert, audioResample, capsFilter, m_deInterleave.get(), nullptr);
- GstPad* sinkPad = gst_element_get_static_pad(audioConvert, "sink");
- gst_pad_link_full(pad, sinkPad, GST_PAD_LINK_CHECK_NOTHING);
- gst_object_unref(GST_OBJECT(sinkPad));
+ GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(audioConvert, "sink"));
+ gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioResample, "src", capsFilter, "sink", GST_PAD_LINK_CHECK_NOTHING);
@@ -301,75 +289,102 @@ void AudioFileReader::plugDeinterleave(GstPad* pad)
void AudioFileReader::decodeAudioForBusCreation()
{
+ ASSERT(&m_runLoop == &RunLoop::current());
+
// Build the pipeline (giostreamsrc | filesrc) ! decodebin2
// A deinterleave element is added once a src pad becomes available in decodebin.
- m_pipeline = gst_pipeline_new(0);
+ m_pipeline = gst_pipeline_new(nullptr);
- GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline)));
+ GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline.get())));
ASSERT(bus);
- gst_bus_add_signal_watch(bus.get());
- g_signal_connect(bus.get(), "message", G_CALLBACK(messageCallback), this);
+ gst_bus_set_sync_handler(bus.get(), [](GstBus*, GstMessage* message, gpointer userData) {
+ auto& reader = *static_cast<AudioFileReader*>(userData);
+ if (&reader.m_runLoop == &RunLoop::current())
+ reader.handleMessage(message);
+ else {
+ GRefPtr<GstMessage> protectMessage(message);
+ auto weakThis = reader.createWeakPtr();
+ reader.m_runLoop.dispatch([weakThis, protectMessage] {
+ if (weakThis)
+ weakThis->handleMessage(protectMessage.get());
+ });
+ }
+ gst_message_unref(message);
+ return GST_BUS_DROP;
+ }, this, nullptr);
GstElement* source;
if (m_data) {
ASSERT(m_dataSize);
- source = gst_element_factory_make("giostreamsrc", 0);
- GRefPtr<GInputStream> memoryStream = adoptGRef(g_memory_input_stream_new_from_data(m_data, m_dataSize, 0));
- g_object_set(source, "stream", memoryStream.get(), NULL);
+ source = gst_element_factory_make("giostreamsrc", nullptr);
+ GRefPtr<GInputStream> memoryStream = adoptGRef(g_memory_input_stream_new_from_data(m_data, m_dataSize, nullptr));
+ g_object_set(source, "stream", memoryStream.get(), nullptr);
} else {
- source = gst_element_factory_make("filesrc", 0);
- g_object_set(source, "location", m_filePath, NULL);
+ source = gst_element_factory_make("filesrc", nullptr);
+ g_object_set(source, "location", m_filePath, nullptr);
}
m_decodebin = gst_element_factory_make("decodebin", "decodebin");
- g_signal_connect(m_decodebin.get(), "pad-added", G_CALLBACK(onGStreamerDecodebinPadAddedCallback), this);
+ g_signal_connect_swapped(m_decodebin.get(), "pad-added", G_CALLBACK(decodebinPadAddedCallback), this);
- gst_bin_add_many(GST_BIN(m_pipeline), source, m_decodebin.get(), NULL);
+ gst_bin_add_many(GST_BIN(m_pipeline.get()), source, m_decodebin.get(), nullptr);
gst_element_link_pads_full(source, "src", m_decodebin.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
- gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
+
+ // Catch errors here immediately, there might not be an error message if we're unlucky.
+ if (gst_element_set_state(m_pipeline.get(), GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) {
+ g_warning("Error: Failed to set pipeline to PAUSED");
+ m_errorOccurred = true;
+ m_runLoop.stop();
+ }
}
PassRefPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono)
{
m_sampleRate = sampleRate;
+ m_channels = mixToMono ? 1 : 2;
- m_frontLeftBuffers = gst_buffer_list_new();
- m_frontRightBuffers = gst_buffer_list_new();
-
- GRefPtr<GMainContext> context = adoptGRef(g_main_context_new());
- g_main_context_push_thread_default(context.get());
- m_loop = adoptGRef(g_main_loop_new(context.get(), FALSE));
+ m_frontLeftBuffers = adoptGRef(gst_buffer_list_new());
+ m_frontRightBuffers = adoptGRef(gst_buffer_list_new());
// Start the pipeline processing just after the loop is started.
- GRefPtr<GSource> timeoutSource = adoptGRef(g_timeout_source_new(0));
- g_source_attach(timeoutSource.get(), context.get());
- g_source_set_callback(timeoutSource.get(), reinterpret_cast<GSourceFunc>(enteredMainLoopCallback), this, 0);
+ m_runLoop.dispatch([this] { decodeAudioForBusCreation(); });
+ m_runLoop.run();
- g_main_loop_run(m_loop.get());
- g_main_context_pop_thread_default(context.get());
+ // Set pipeline to GST_STATE_NULL state here already ASAP to
+ // release any resources that might still be used.
+ gst_element_set_state(m_pipeline.get(), GST_STATE_NULL);
if (m_errorOccurred)
- return 0;
+ return nullptr;
- unsigned channels = mixToMono ? 1 : 2;
- RefPtr<AudioBus> audioBus = AudioBus::create(channels, m_channelSize, true);
+ RefPtr<AudioBus> audioBus = AudioBus::create(m_channels, m_channelSize, true);
audioBus->setSampleRate(m_sampleRate);
- copyGstreamerBuffersToAudioChannel(m_frontLeftBuffers, audioBus->channel(0));
+ copyGstreamerBuffersToAudioChannel(m_frontLeftBuffers.get(), audioBus->channel(0));
if (!mixToMono)
- copyGstreamerBuffersToAudioChannel(m_frontRightBuffers, audioBus->channel(1));
+ copyGstreamerBuffersToAudioChannel(m_frontRightBuffers.get(), audioBus->channel(1));
return audioBus;
}
PassRefPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate)
{
- return AudioFileReader(filePath).createBus(sampleRate, mixToMono);
+ RefPtr<AudioBus> returnValue;
+ auto threadID = createThread("AudioFileReader", [&returnValue, filePath, mixToMono, sampleRate] {
+ returnValue = AudioFileReader(filePath).createBus(sampleRate, mixToMono);
+ });
+ waitForThreadCompletion(threadID);
+ return returnValue;
}
PassRefPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
{
- return AudioFileReader(data, dataSize).createBus(sampleRate, mixToMono);
+ RefPtr<AudioBus> returnValue;
+ auto threadID = createThread("AudioFileReader", [&returnValue, data, dataSize, mixToMono, sampleRate] {
+ returnValue = AudioFileReader(data, dataSize).createBus(sampleRate, mixToMono);
+ });
+ waitForThreadCompletion(threadID);
+ return returnValue;
}
} // WebCore
diff --git a/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp
new file mode 100644
index 000000000..4d7f4154d
--- /dev/null
+++ b/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2014 Igalia S.L
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "AudioSourceProviderGStreamer.h"
+
+#if ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER)
+
+#include "AudioBus.h"
+#include "AudioSourceProviderClient.h"
+#include <gst/app/gstappsink.h>
+#include <gst/audio/audio-info.h>
+#include <gst/base/gstadapter.h>
+#include <wtf/glib/GMutexLocker.h>
+
+
+namespace WebCore {
+
+// For now the provider supports only stereo files at a fixed sample
+// bitrate.
+static const int gNumberOfChannels = 2;
+static const float gSampleBitRate = 44100;
+
+static GstFlowReturn onAppsinkNewBufferCallback(GstAppSink* sink, gpointer userData)
+{
+ return static_cast<AudioSourceProviderGStreamer*>(userData)->handleAudioBuffer(sink);
+}
+
+static void onGStreamerDeinterleavePadAddedCallback(GstElement*, GstPad* pad, AudioSourceProviderGStreamer* provider)
+{
+ provider->handleNewDeinterleavePad(pad);
+}
+
+static void onGStreamerDeinterleaveReadyCallback(GstElement*, AudioSourceProviderGStreamer* provider)
+{
+ provider->deinterleavePadsConfigured();
+}
+
+static void onGStreamerDeinterleavePadRemovedCallback(GstElement*, GstPad* pad, AudioSourceProviderGStreamer* provider)
+{
+ provider->handleRemovedDeinterleavePad(pad);
+}
+
+static GstPadProbeReturn onAppsinkFlushCallback(GstPad*, GstPadProbeInfo* info, gpointer userData)
+{
+ if (GST_PAD_PROBE_INFO_TYPE(info) & (GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM | GST_PAD_PROBE_TYPE_EVENT_FLUSH)) {
+ GstEvent* event = GST_PAD_PROBE_INFO_EVENT(info);
+ if (GST_EVENT_TYPE(event) == GST_EVENT_FLUSH_STOP) {
+ AudioSourceProviderGStreamer* provider = reinterpret_cast<AudioSourceProviderGStreamer*>(userData);
+ provider->clearAdapters();
+ }
+ }
+ return GST_PAD_PROBE_OK;
+}
+
+static void copyGStreamerBuffersToAudioChannel(GstAdapter* adapter, AudioBus* bus , int channelNumber, size_t framesToProcess)
+{
+ if (!gst_adapter_available(adapter)) {
+ bus->zero();
+ return;
+ }
+
+ size_t bytes = framesToProcess * sizeof(float);
+ if (gst_adapter_available(adapter) >= bytes) {
+ gst_adapter_copy(adapter, bus->channel(channelNumber)->mutableData(), 0, bytes);
+ gst_adapter_flush(adapter, bytes);
+ }
+}
+
+AudioSourceProviderGStreamer::AudioSourceProviderGStreamer()
+ : m_client(nullptr)
+ , m_deinterleaveSourcePads(0)
+ , m_deinterleavePadAddedHandlerId(0)
+ , m_deinterleaveNoMorePadsHandlerId(0)
+ , m_deinterleavePadRemovedHandlerId(0)
+{
+ g_mutex_init(&m_adapterMutex);
+ m_frontLeftAdapter = gst_adapter_new();
+ m_frontRightAdapter = gst_adapter_new();
+}
+
+AudioSourceProviderGStreamer::~AudioSourceProviderGStreamer()
+{
+ GRefPtr<GstElement> deinterleave = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "deinterleave"));
+ if (deinterleave) {
+ g_signal_handler_disconnect(deinterleave.get(), m_deinterleavePadAddedHandlerId);
+ g_signal_handler_disconnect(deinterleave.get(), m_deinterleaveNoMorePadsHandlerId);
+ g_signal_handler_disconnect(deinterleave.get(), m_deinterleavePadRemovedHandlerId);
+ }
+
+ g_object_unref(m_frontLeftAdapter);
+ g_object_unref(m_frontRightAdapter);
+ g_mutex_clear(&m_adapterMutex);
+}
+
+void AudioSourceProviderGStreamer::configureAudioBin(GstElement* audioBin, GstElement* teePredecessor)
+{
+ m_audioSinkBin = audioBin;
+
+ GstElement* audioTee = gst_element_factory_make("tee", "audioTee");
+ GstElement* audioQueue = gst_element_factory_make("queue", nullptr);
+ GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr);
+ GstElement* audioConvert2 = gst_element_factory_make("audioconvert", nullptr);
+ GstElement* audioResample = gst_element_factory_make("audioresample", nullptr);
+ GstElement* audioResample2 = gst_element_factory_make("audioresample", nullptr);
+ GstElement* volumeElement = gst_element_factory_make("volume", "volume");
+ GstElement* audioSink = gst_element_factory_make("autoaudiosink", nullptr);
+
+ gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), audioTee, audioQueue, audioConvert, audioResample, volumeElement, audioConvert2, audioResample2, audioSink, nullptr);
+
+ // In cases where the audio-sink needs elements before tee (such
+ // as scaletempo) they need to be linked to tee which in this case
+ // doesn't need a ghost pad. It is assumed that the teePredecessor
+ // chain already configured a ghost pad.
+ if (teePredecessor)
+ gst_element_link_pads_full(teePredecessor, "src", audioTee, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ else {
+ // Add a ghostpad to the bin so it can proxy to tee.
+ GRefPtr<GstPad> audioTeeSinkPad = adoptGRef(gst_element_get_static_pad(audioTee, "sink"));
+ gst_element_add_pad(m_audioSinkBin.get(), gst_ghost_pad_new("sink", audioTeeSinkPad.get()));
+ }
+
+ // Link a new src pad from tee to queue ! audioconvert !
+ // audioresample ! volume ! audioconvert ! audioresample !
+ // autoaudiosink. The audioresample and audioconvert are needed to
+ // ensure the audio sink receives buffers in the correct format.
+ gst_element_link_pads_full(audioTee, "src_%u", audioQueue, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioQueue, "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioResample, "src", volumeElement, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(volumeElement, "src", audioConvert2, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioConvert2, "src", audioResample2, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioResample2, "src", audioSink, "sink", GST_PAD_LINK_CHECK_NOTHING);
+}
+
+void AudioSourceProviderGStreamer::provideInput(AudioBus* bus, size_t framesToProcess)
+{
+ WTF::GMutexLocker<GMutex> lock(m_adapterMutex);
+ copyGStreamerBuffersToAudioChannel(m_frontLeftAdapter, bus, 0, framesToProcess);
+ copyGStreamerBuffersToAudioChannel(m_frontRightAdapter, bus, 1, framesToProcess);
+}
+
+GstFlowReturn AudioSourceProviderGStreamer::handleAudioBuffer(GstAppSink* sink)
+{
+ if (!m_client)
+ return GST_FLOW_OK;
+
+ // Pull a buffer from appsink and store it the appropriate buffer
+ // list for the audio channel it represents.
+ GRefPtr<GstSample> sample = adoptGRef(gst_app_sink_pull_sample(sink));
+ if (!sample)
+ return gst_app_sink_is_eos(sink) ? GST_FLOW_EOS : GST_FLOW_ERROR;
+
+ GstBuffer* buffer = gst_sample_get_buffer(sample.get());
+ if (!buffer)
+ return GST_FLOW_ERROR;
+
+ GstCaps* caps = gst_sample_get_caps(sample.get());
+ if (!caps)
+ return GST_FLOW_ERROR;
+
+ GstAudioInfo info;
+ gst_audio_info_from_caps(&info, caps);
+
+ WTF::GMutexLocker<GMutex> lock(m_adapterMutex);
+
+ // Check the first audio channel. The buffer is supposed to store
+ // data of a single channel anyway.
+ switch (GST_AUDIO_INFO_POSITION(&info, 0)) {
+ case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
+ case GST_AUDIO_CHANNEL_POSITION_MONO:
+ gst_adapter_push(m_frontLeftAdapter, gst_buffer_ref(buffer));
+ break;
+ case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
+ gst_adapter_push(m_frontRightAdapter, gst_buffer_ref(buffer));
+ break;
+ default:
+ break;
+ }
+
+ return GST_FLOW_OK;
+}
+
+void AudioSourceProviderGStreamer::setClient(AudioSourceProviderClient* client)
+{
+ ASSERT(client);
+ m_client = client;
+
+ // The volume element is used to mute audio playback towards the
+ // autoaudiosink. This is needed to avoid double playback of audio
+ // from our audio sink and from the WebAudio AudioDestination node
+ // supposedly configured already by application side.
+ GRefPtr<GstElement> volumeElement = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "volume"));
+ g_object_set(volumeElement.get(), "mute", TRUE, nullptr);
+
+ // The audioconvert and audioresample elements are needed to
+ // ensure deinterleave and the sinks downstream receive buffers in
+ // the format specified by the capsfilter.
+ GstElement* audioQueue = gst_element_factory_make("queue", nullptr);
+ GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr);
+ GstElement* audioResample = gst_element_factory_make("audioresample", nullptr);
+ GstElement* capsFilter = gst_element_factory_make("capsfilter", nullptr);
+ GstElement* deInterleave = gst_element_factory_make("deinterleave", "deinterleave");
+
+ g_object_set(deInterleave, "keep-positions", TRUE, nullptr);
+ m_deinterleavePadAddedHandlerId = g_signal_connect(deInterleave, "pad-added", G_CALLBACK(onGStreamerDeinterleavePadAddedCallback), this);
+ m_deinterleaveNoMorePadsHandlerId = g_signal_connect(deInterleave, "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this);
+ m_deinterleavePadRemovedHandlerId = g_signal_connect(deInterleave, "pad-removed", G_CALLBACK(onGStreamerDeinterleavePadRemovedCallback), this);
+
+ GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate),
+ "channels", G_TYPE_INT, gNumberOfChannels,
+ "format", G_TYPE_STRING, GST_AUDIO_NE(F32),
+ "layout", G_TYPE_STRING, "interleaved", nullptr);
+
+ g_object_set(capsFilter, "caps", caps, nullptr);
+ gst_caps_unref(caps);
+
+ gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), audioQueue, audioConvert, audioResample, capsFilter, deInterleave, nullptr);
+
+ GRefPtr<GstElement> audioTee = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "audioTee"));
+
+ // Link a new src pad from tee to queue ! audioconvert !
+ // audioresample ! capsfilter ! deinterleave. Later
+ // on each deinterleaved planar audio channel will be routed to an
+ // appsink for data extraction and processing.
+ gst_element_link_pads_full(audioTee.get(), "src_%u", audioQueue, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioQueue, "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(audioResample, "src", capsFilter, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_link_pads_full(capsFilter, "src", deInterleave, "sink", GST_PAD_LINK_CHECK_NOTHING);
+
+ gst_element_sync_state_with_parent(audioQueue);
+ gst_element_sync_state_with_parent(audioConvert);
+ gst_element_sync_state_with_parent(audioResample);
+ gst_element_sync_state_with_parent(capsFilter);
+ gst_element_sync_state_with_parent(deInterleave);
+}
+
+void AudioSourceProviderGStreamer::handleNewDeinterleavePad(GstPad* pad)
+{
+ m_deinterleaveSourcePads++;
+
+ if (m_deinterleaveSourcePads > 2) {
+ g_warning("The AudioSourceProvider supports only mono and stereo audio. Silencing out this new channel.");
+ GstElement* queue = gst_element_factory_make("queue", nullptr);
+ GstElement* sink = gst_element_factory_make("fakesink", nullptr);
+ g_object_set(sink, "async", FALSE, nullptr);
+ gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr);
+
+ GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink"));
+ gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
+
+ GQuark quark = g_quark_from_static_string("peer");
+ g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get());
+ gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_element_sync_state_with_parent(queue);
+ gst_element_sync_state_with_parent(sink);
+ return;
+ }
+
+ // A new pad for a planar channel was added in deinterleave. Plug
+ // in an appsink so we can pull the data from each
+ // channel. Pipeline looks like:
+ // ... deinterleave ! queue ! appsink.
+ GstElement* queue = gst_element_factory_make("queue", nullptr);
+ GstElement* sink = gst_element_factory_make("appsink", nullptr);
+
+ GstAppSinkCallbacks callbacks;
+ callbacks.eos = nullptr;
+ callbacks.new_preroll = nullptr;
+ callbacks.new_sample = onAppsinkNewBufferCallback;
+ gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, nullptr);
+
+ g_object_set(sink, "async", FALSE, nullptr);
+
+ GRefPtr<GstCaps> caps = adoptGRef(gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate),
+ "channels", G_TYPE_INT, 1,
+ "format", G_TYPE_STRING, GST_AUDIO_NE(F32),
+ "layout", G_TYPE_STRING, "interleaved", nullptr));
+
+ gst_app_sink_set_caps(GST_APP_SINK(sink), caps.get());
+
+ gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr);
+
+ GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink"));
+ gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
+
+ GQuark quark = g_quark_from_static_string("peer");
+ g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get());
+
+ gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);
+
+ sinkPad = adoptGRef(gst_element_get_static_pad(sink, "sink"));
+ gst_pad_add_probe(sinkPad.get(), GST_PAD_PROBE_TYPE_EVENT_FLUSH, onAppsinkFlushCallback, this, nullptr);
+
+ gst_element_sync_state_with_parent(queue);
+ gst_element_sync_state_with_parent(sink);
+}
+
+void AudioSourceProviderGStreamer::handleRemovedDeinterleavePad(GstPad* pad)
+{
+ m_deinterleaveSourcePads--;
+
+ // Remove the queue ! appsink chain downstream of deinterleave.
+ GQuark quark = g_quark_from_static_string("peer");
+ GstPad* sinkPad = reinterpret_cast<GstPad*>(g_object_get_qdata(G_OBJECT(pad), quark));
+ GRefPtr<GstElement> queue = adoptGRef(gst_pad_get_parent_element(sinkPad));
+ GRefPtr<GstPad> queueSrcPad = adoptGRef(gst_element_get_static_pad(queue.get(), "src"));
+ GRefPtr<GstPad> appsinkSinkPad = adoptGRef(gst_pad_get_peer(queueSrcPad.get()));
+ GRefPtr<GstElement> sink = adoptGRef(gst_pad_get_parent_element(appsinkSinkPad.get()));
+ gst_element_set_state(sink.get(), GST_STATE_NULL);
+ gst_element_set_state(queue.get(), GST_STATE_NULL);
+ gst_element_unlink(queue.get(), sink.get());
+ gst_bin_remove_many(GST_BIN(m_audioSinkBin.get()), queue.get(), sink.get(), nullptr);
+}
+
+void AudioSourceProviderGStreamer::deinterleavePadsConfigured()
+{
+ ASSERT(m_client);
+ ASSERT(m_deinterleaveSourcePads == gNumberOfChannels);
+
+ m_client->setFormat(m_deinterleaveSourcePads, gSampleBitRate);
+}
+
+void AudioSourceProviderGStreamer::clearAdapters()
+{
+ WTF::GMutexLocker<GMutex> lock(m_adapterMutex);
+ gst_adapter_clear(m_frontLeftAdapter);
+ gst_adapter_clear(m_frontRightAdapter);
+}
+
+} // WebCore
+
+#endif // ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER)
diff --git a/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.h b/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.h
new file mode 100644
index 000000000..5b6480f3a
--- /dev/null
+++ b/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 Igalia S.L
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AudioSourceProviderGStreamer_h
+#define AudioSourceProviderGStreamer_h
+
+#if ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER)
+
+#include "AudioSourceProvider.h"
+#include "GRefPtrGStreamer.h"
+#include <gst/gst.h>
+#include <wtf/Forward.h>
+#include <wtf/Noncopyable.h>
+
+typedef struct _GstAdapter GstAdapter;
+typedef struct _GstAppSink GstAppSink;
+
+namespace WebCore {
+
+class AudioSourceProviderGStreamer : public AudioSourceProvider {
+ WTF_MAKE_NONCOPYABLE(AudioSourceProviderGStreamer);
+public:
+ AudioSourceProviderGStreamer();
+ ~AudioSourceProviderGStreamer();
+
+ void configureAudioBin(GstElement* audioBin, GstElement* teePredecessor);
+
+ void provideInput(AudioBus*, size_t framesToProcess) override;
+ void setClient(AudioSourceProviderClient*) override;
+ const AudioSourceProviderClient* client() const { return m_client; }
+
+ void handleNewDeinterleavePad(GstPad*);
+ void deinterleavePadsConfigured();
+ void handleRemovedDeinterleavePad(GstPad*);
+
+ GstFlowReturn handleAudioBuffer(GstAppSink*);
+ GstElement* getAudioBin() const { return m_audioSinkBin.get(); }
+ void clearAdapters();
+
+private:
+ GRefPtr<GstElement> m_audioSinkBin;
+ AudioSourceProviderClient* m_client;
+ int m_deinterleaveSourcePads;
+ GstAdapter* m_frontLeftAdapter;
+ GstAdapter* m_frontRightAdapter;
+ unsigned long m_deinterleavePadAddedHandlerId;
+ unsigned long m_deinterleaveNoMorePadsHandlerId;
+ unsigned long m_deinterleavePadRemovedHandlerId;
+ GMutex m_adapterMutex;
+};
+
+}
+#endif // ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER)
+
+#endif // AudioSourceProviderGStreamer_h
diff --git a/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp
index ff672f371..445c9793c 100644
--- a/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp
+++ b/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2011, 2012 Igalia S.L
+ * Copyright (C) 2014 Sebastian Dröge <sebastian@centricular.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -26,9 +27,10 @@
#include "AudioIOCallback.h"
#include "GRefPtrGStreamer.h"
#include "GStreamerUtilities.h"
-#include <gst/audio/audio.h>
-#include <gst/pbutils/pbutils.h>
-#include <wtf/gobject/GUniquePtr.h>
+#include <gst/app/gstappsrc.h>
+#include <gst/audio/audio-info.h>
+#include <gst/pbutils/missing-plugins.h>
+#include <wtf/glib/GUniquePtr.h>
using namespace WebCore;
@@ -51,18 +53,22 @@ struct _WebKitWebAudioSourcePrivate {
AudioBus* bus;
AudioIOCallback* provider;
guint framesToPull;
+ guint bufferSize;
GRefPtr<GstElement> interleave;
- GRefPtr<GstElement> wavEncoder;
GRefPtr<GstTask> task;
GRecMutex mutex;
- GSList* pads; // List of queue sink pads. One queue for each planar audio channel.
- GstPad* sourcePad; // src pad of the element, interleaved wav data is pushed to it.
+ // List of appsrc. One appsrc for each planar audio channel.
+ Vector<GRefPtr<GstElement>> sources;
- bool newStreamEventPending;
- GstSegment segment;
+ // src pad of the element, interleaved wav data is pushed to it.
+ GstPad* sourcePad;
+
+ guint64 numberOfSamples;
+
+ GRefPtr<GstBufferPool> pool;
};
enum {
@@ -73,9 +79,9 @@ enum {
};
static GstStaticPadTemplate srcTemplate = GST_STATIC_PAD_TEMPLATE("src",
- GST_PAD_SRC,
- GST_PAD_ALWAYS,
- GST_STATIC_CAPS("audio/x-wav"));
+ GST_PAD_SRC,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS(GST_AUDIO_CAPS_MAKE(GST_AUDIO_NE(F32))));
GST_DEBUG_CATEGORY_STATIC(webkit_web_audio_src_debug);
#define GST_CAT_DEFAULT webkit_web_audio_src_debug
@@ -91,8 +97,8 @@ static GstCaps* getGStreamerMonoAudioCaps(float sampleRate)
{
return gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(sampleRate),
"channels", G_TYPE_INT, 1,
- "format", G_TYPE_STRING, gst_audio_format_to_string(GST_AUDIO_FORMAT_F32),
- "layout", G_TYPE_STRING, "non-interleaved", NULL);
+ "format", G_TYPE_STRING, GST_AUDIO_NE(F32),
+ "layout", G_TYPE_STRING, "interleaved", nullptr);
}
static GstAudioChannelPosition webKitWebAudioGStreamerChannelPosition(int channelIndex)
@@ -178,17 +184,14 @@ static void webkit_web_audio_src_init(WebKitWebAudioSrc* src)
src->priv = priv;
new (priv) WebKitWebAudioSourcePrivate();
- priv->sourcePad = webkitGstGhostPadFromStaticTemplate(&srcTemplate, "src", 0);
+ priv->sourcePad = webkitGstGhostPadFromStaticTemplate(&srcTemplate, "src", nullptr);
gst_element_add_pad(GST_ELEMENT(src), priv->sourcePad);
- priv->provider = 0;
- priv->bus = 0;
-
- priv->newStreamEventPending = true;
- gst_segment_init(&priv->segment, GST_FORMAT_TIME);
+ priv->provider = nullptr;
+ priv->bus = nullptr;
g_rec_mutex_init(&priv->mutex);
- priv->task = gst_task_new(reinterpret_cast<GstTaskFunction>(webKitWebAudioSrcLoop), src, 0);
+ priv->task = adoptGRef(gst_task_new(reinterpret_cast<GstTaskFunction>(webKitWebAudioSrcLoop), src, nullptr));
gst_task_set_lock(priv->task.get(), &priv->mutex);
}
@@ -202,54 +205,40 @@ static void webKitWebAudioSrcConstructed(GObject* object)
ASSERT(priv->provider);
ASSERT(priv->sampleRate);
- priv->interleave = gst_element_factory_make("interleave", 0);
- priv->wavEncoder = gst_element_factory_make("wavenc", 0);
+ priv->interleave = gst_element_factory_make("interleave", nullptr);
if (!priv->interleave) {
GST_ERROR_OBJECT(src, "Failed to create interleave");
return;
}
- if (!priv->wavEncoder) {
- GST_ERROR_OBJECT(src, "Failed to create wavenc");
- return;
- }
-
- gst_bin_add_many(GST_BIN(src), priv->interleave.get(), priv->wavEncoder.get(), NULL);
- gst_element_link_pads_full(priv->interleave.get(), "src", priv->wavEncoder.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
+ gst_bin_add(GST_BIN(src), priv->interleave.get());
// For each channel of the bus create a new upstream branch for interleave, like:
- // queue ! capsfilter ! audioconvert. which is plugged to a new interleave request sinkpad.
+ // appsrc ! . which is plugged to a new interleave request sinkpad.
for (unsigned channelIndex = 0; channelIndex < priv->bus->numberOfChannels(); channelIndex++) {
- GUniquePtr<gchar> queueName(g_strdup_printf("webaudioQueue%u", channelIndex));
- GstElement* queue = gst_element_factory_make("queue", queueName.get());
- GstElement* capsfilter = gst_element_factory_make("capsfilter", 0);
- GstElement* audioconvert = gst_element_factory_make("audioconvert", 0);
-
+ GUniquePtr<gchar> appsrcName(g_strdup_printf("webaudioSrc%u", channelIndex));
+ GRefPtr<GstElement> appsrc = gst_element_factory_make("appsrc", appsrcName.get());
GRefPtr<GstCaps> monoCaps = adoptGRef(getGStreamerMonoAudioCaps(priv->sampleRate));
GstAudioInfo info;
gst_audio_info_from_caps(&info, monoCaps.get());
GST_AUDIO_INFO_POSITION(&info, 0) = webKitWebAudioGStreamerChannelPosition(channelIndex);
GRefPtr<GstCaps> caps = adoptGRef(gst_audio_info_to_caps(&info));
- g_object_set(capsfilter, "caps", caps.get(), NULL);
-
- // Configure the queue for minimal latency.
- g_object_set(queue, "max-size-buffers", static_cast<guint>(1), NULL);
- GstPad* pad = gst_element_get_static_pad(queue, "sink");
- priv->pads = g_slist_prepend(priv->pads, pad);
+ // Configure the appsrc for minimal latency.
+ g_object_set(appsrc.get(), "max-bytes", static_cast<guint64>(2 * priv->bufferSize), "block", TRUE,
+ "blocksize", priv->bufferSize,
+ "format", GST_FORMAT_TIME, "caps", caps.get(), nullptr);
- gst_bin_add_many(GST_BIN(src), queue, capsfilter, audioconvert, NULL);
- gst_element_link_pads_full(queue, "src", capsfilter, "sink", GST_PAD_LINK_CHECK_NOTHING);
- gst_element_link_pads_full(capsfilter, "src", audioconvert, "sink", GST_PAD_LINK_CHECK_NOTHING);
- gst_element_link_pads_full(audioconvert, "src", priv->interleave.get(), 0, GST_PAD_LINK_CHECK_NOTHING);
+ priv->sources.append(appsrc);
+ gst_bin_add(GST_BIN(src), appsrc.get());
+ gst_element_link_pads_full(appsrc.get(), "src", priv->interleave.get(), "sink_%u", GST_PAD_LINK_CHECK_NOTHING);
}
- priv->pads = g_slist_reverse(priv->pads);
- // wavenc's src pad is the only visible pad of our element.
- GRefPtr<GstPad> targetPad = adoptGRef(gst_element_get_static_pad(priv->wavEncoder.get(), "src"));
+ // interleave's src pad is the only visible pad of our element.
+ GRefPtr<GstPad> targetPad = adoptGRef(gst_element_get_static_pad(priv->interleave.get(), "src"));
gst_ghost_pad_set_target(GST_GHOST_PAD(priv->sourcePad), targetPad.get());
}
@@ -260,8 +249,6 @@ static void webKitWebAudioSrcFinalize(GObject* object)
g_rec_mutex_clear(&priv->mutex);
- g_slist_free_full(priv->pads, reinterpret_cast<GDestroyNotify>(gst_object_unref));
-
priv->~WebKitWebAudioSourcePrivate();
GST_CALL_PARENT(G_OBJECT_CLASS, finalize, ((GObject* )(src)));
}
@@ -283,6 +270,7 @@ static void webKitWebAudioSrcSetProperty(GObject* object, guint propertyId, cons
break;
case PROP_FRAMES:
priv->framesToPull = g_value_get_uint(value);
+ priv->bufferSize = sizeof(float) * priv->framesToPull;
break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID(object, propertyId, pspec);
@@ -320,68 +308,65 @@ static void webKitWebAudioSrcLoop(WebKitWebAudioSrc* src)
ASSERT(priv->bus);
ASSERT(priv->provider);
- if (!priv->provider || !priv->bus)
+ if (!priv->provider || !priv->bus) {
+ GST_ELEMENT_ERROR(src, CORE, FAILED, ("Internal WebAudioSrc error"), ("Can't start without provider or bus"));
+ gst_task_stop(src->priv->task.get());
return;
-
- GSList* channelBufferList = 0;
- register int i;
- unsigned bufferSize = priv->framesToPull * sizeof(float);
- for (i = g_slist_length(priv->pads) - 1; i >= 0; i--) {
- GstBuffer* channelBuffer = gst_buffer_new_and_alloc(bufferSize);
- ASSERT(channelBuffer);
- channelBufferList = g_slist_prepend(channelBufferList, channelBuffer);
- GstMapInfo info;
- gst_buffer_map(channelBuffer, &info, GST_MAP_READ);
- priv->bus->setChannelMemory(i, reinterpret_cast<float*>(info.data), priv->framesToPull);
- gst_buffer_unmap(channelBuffer, &info);
}
- // FIXME: Add support for local/live audio input.
- priv->provider->render(0, priv->bus, priv->framesToPull);
-
- GSList* padsIt = priv->pads;
- GSList* buffersIt = channelBufferList;
-
-#if GST_CHECK_VERSION(1, 2, 0)
- guint groupId = 0;
- if (priv->newStreamEventPending)
- groupId = gst_util_group_id_next();
-#endif
-
- for (i = 0; padsIt && buffersIt; padsIt = g_slist_next(padsIt), buffersIt = g_slist_next(buffersIt), ++i) {
- GstPad* pad = static_cast<GstPad*>(padsIt->data);
- GstBuffer* channelBuffer = static_cast<GstBuffer*>(buffersIt->data);
-
- // Send stream-start, segment and caps events downstream, along with the first buffer.
- if (priv->newStreamEventPending) {
- GRefPtr<GstElement> queue = adoptGRef(gst_pad_get_parent_element(pad));
- GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue.get(), "sink"));
- GUniquePtr<gchar> queueName(gst_element_get_name(queue.get()));
- GUniquePtr<gchar> streamId(g_strdup_printf("webaudio/%s", queueName.get()));
- GstEvent* streamStartEvent = gst_event_new_stream_start(streamId.get());
-#if GST_CHECK_VERSION(1, 2, 0)
- gst_event_set_group_id(streamStartEvent, groupId);
-#endif
- gst_pad_send_event(sinkPad.get(), streamStartEvent);
-
- GRefPtr<GstCaps> monoCaps = adoptGRef(getGStreamerMonoAudioCaps(priv->sampleRate));
- GstAudioInfo info;
- gst_audio_info_from_caps(&info, monoCaps.get());
- GST_AUDIO_INFO_POSITION(&info, 0) = webKitWebAudioGStreamerChannelPosition(i);
- GRefPtr<GstCaps> capsWithChannelPosition = adoptGRef(gst_audio_info_to_caps(&info));
- gst_pad_send_event(sinkPad.get(), gst_event_new_caps(capsWithChannelPosition.get()));
-
- gst_pad_send_event(sinkPad.get(), gst_event_new_segment(&priv->segment));
+ ASSERT(priv->pool);
+ GstClockTime timestamp = gst_util_uint64_scale(priv->numberOfSamples, GST_SECOND, priv->sampleRate);
+ priv->numberOfSamples += priv->framesToPull;
+ GstClockTime duration = gst_util_uint64_scale(priv->numberOfSamples, GST_SECOND, priv->sampleRate) - timestamp;
+
+ Vector<GRefPtr<GstBuffer>> channelBufferList;
+ channelBufferList.reserveInitialCapacity(priv->sources.size());
+ for (unsigned i = 0; i < priv->sources.size(); ++i) {
+ GRefPtr<GstBuffer> buffer;
+ GstFlowReturn ret = gst_buffer_pool_acquire_buffer(priv->pool.get(), &buffer.outPtr(), nullptr);
+ if (ret != GST_FLOW_OK) {
+ for (auto& buffer : channelBufferList)
+ unmapGstBuffer(buffer.get());
+
+ // FLUSHING and EOS are not errors.
+ if (ret < GST_FLOW_EOS || ret == GST_FLOW_NOT_LINKED)
+ GST_ELEMENT_ERROR(src, CORE, PAD, ("Internal WebAudioSrc error"), ("Failed to allocate buffer for flow: %s", gst_flow_get_name(ret)));
+ gst_task_stop(src->priv->task.get());
+ return;
}
- GstFlowReturn ret = gst_pad_chain(pad, channelBuffer);
- if (ret != GST_FLOW_OK)
- GST_ELEMENT_ERROR(src, CORE, PAD, ("Internal WebAudioSrc error"), ("Failed to push buffer on %s:%s flow: %s", GST_DEBUG_PAD_NAME(pad), gst_flow_get_name(ret)));
+ ASSERT(buffer);
+ GST_BUFFER_TIMESTAMP(buffer.get()) = timestamp;
+ GST_BUFFER_DURATION(buffer.get()) = duration;
+ mapGstBuffer(buffer.get(), GST_MAP_READWRITE);
+ priv->bus->setChannelMemory(i, reinterpret_cast<float*>(getGstBufferDataPointer(buffer.get())), priv->framesToPull);
+ channelBufferList.uncheckedAppend(WTFMove(buffer));
}
- priv->newStreamEventPending = false;
-
- g_slist_free(channelBufferList);
+ // FIXME: Add support for local/live audio input.
+ priv->provider->render(nullptr, priv->bus, priv->framesToPull);
+
+ ASSERT(channelBufferList.size() == priv->sources.size());
+ bool failed = false;
+ for (unsigned i = 0; i < priv->sources.size(); ++i) {
+ // Unmap before passing on the buffer.
+ auto& buffer = channelBufferList[i];
+ unmapGstBuffer(buffer.get());
+
+ if (failed)
+ continue;
+
+ auto& appsrc = priv->sources[i];
+ // Leak the buffer ref, because gst_app_src_push_buffer steals it.
+ GstFlowReturn ret = gst_app_src_push_buffer(GST_APP_SRC(appsrc.get()), buffer.leakRef());
+ if (ret != GST_FLOW_OK) {
+ // FLUSHING and EOS are not errors.
+ if (ret < GST_FLOW_EOS || ret == GST_FLOW_NOT_LINKED)
+ GST_ELEMENT_ERROR(src, CORE, PAD, ("Internal WebAudioSrc error"), ("Failed to push buffer on %s flow: %s", GST_OBJECT_NAME(appsrc.get()), gst_flow_get_name(ret)));
+ gst_task_stop(src->priv->task.get());
+ failed = true;
+ }
+ }
}
static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, GstStateChange transition)
@@ -393,14 +378,10 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs
case GST_STATE_CHANGE_NULL_TO_READY:
if (!src->priv->interleave) {
gst_element_post_message(element, gst_missing_element_message_new(element, "interleave"));
- GST_ELEMENT_ERROR(src, CORE, MISSING_PLUGIN, (0), ("no interleave"));
- return GST_STATE_CHANGE_FAILURE;
- }
- if (!src->priv->wavEncoder) {
- gst_element_post_message(element, gst_missing_element_message_new(element, "wavenc"));
- GST_ELEMENT_ERROR(src, CORE, MISSING_PLUGIN, (0), ("no wavenc"));
+ GST_ELEMENT_ERROR(src, CORE, MISSING_PLUGIN, (nullptr), ("no interleave"));
return GST_STATE_CHANGE_FAILURE;
}
+ src->priv->numberOfSamples = 0;
break;
default:
break;
@@ -413,16 +394,29 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs
}
switch (transition) {
- case GST_STATE_CHANGE_READY_TO_PAUSED:
+ case GST_STATE_CHANGE_READY_TO_PAUSED: {
GST_DEBUG_OBJECT(src, "READY->PAUSED");
- if (!gst_task_start(src->priv->task.get()))
+
+ src->priv->pool = gst_buffer_pool_new();
+ GstStructure* config = gst_buffer_pool_get_config(src->priv->pool.get());
+ gst_buffer_pool_config_set_params(config, nullptr, src->priv->bufferSize, 0, 0);
+ gst_buffer_pool_set_config(src->priv->pool.get(), config);
+ if (!gst_buffer_pool_set_active(src->priv->pool.get(), TRUE))
+ returnValue = GST_STATE_CHANGE_FAILURE;
+ else if (!gst_task_start(src->priv->task.get()))
returnValue = GST_STATE_CHANGE_FAILURE;
break;
+ }
case GST_STATE_CHANGE_PAUSED_TO_READY:
- src->priv->newStreamEventPending = true;
GST_DEBUG_OBJECT(src, "PAUSED->READY");
+
+#if GST_CHECK_VERSION(1, 4, 0)
+ gst_buffer_pool_set_flushing(src->priv->pool.get(), TRUE);
+#endif
if (!gst_task_join(src->priv->task.get()))
returnValue = GST_STATE_CHANGE_FAILURE;
+ gst_buffer_pool_set_active(src->priv->pool.get(), FALSE);
+ src->priv->pool = nullptr;
break;
default:
break;