summaryrefslogtreecommitdiff
path: root/Source/WebCore/Modules/webaudio
diff options
context:
space:
mode:
Diffstat (limited to 'Source/WebCore/Modules/webaudio')
-rw-r--r--Source/WebCore/Modules/webaudio/AnalyserNode.cpp35
-rw-r--r--Source/WebCore/Modules/webaudio/AnalyserNode.h40
-rw-r--r--Source/WebCore/Modules/webaudio/AnalyserNode.idl14
-rw-r--r--Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp44
-rw-r--r--Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h20
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp28
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h21
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp4
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h24
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBuffer.cpp93
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBuffer.h34
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBuffer.idl11
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBufferCallback.h7
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBufferCallback.idl4
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp277
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h54
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBufferSourceNode.idl22
-rw-r--r--Source/WebCore/Modules/webaudio/AudioContext.cpp712
-rw-r--r--Source/WebCore/Modules/webaudio/AudioContext.h251
-rw-r--r--Source/WebCore/Modules/webaudio/AudioContext.idl66
-rw-r--r--Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp62
-rw-r--r--Source/WebCore/Modules/webaudio/AudioDestinationNode.h66
-rw-r--r--Source/WebCore/Modules/webaudio/AudioListener.cpp2
-rw-r--r--Source/WebCore/Modules/webaudio/AudioListener.h15
-rw-r--r--Source/WebCore/Modules/webaudio/AudioListener.idl12
-rw-r--r--Source/WebCore/Modules/webaudio/AudioNode.cpp205
-rw-r--r--Source/WebCore/Modules/webaudio/AudioNode.h41
-rw-r--r--Source/WebCore/Modules/webaudio/AudioNode.idl25
-rw-r--r--Source/WebCore/Modules/webaudio/AudioNodeInput.cpp24
-rw-r--r--Source/WebCore/Modules/webaudio/AudioNodeInput.h10
-rw-r--r--Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp45
-rw-r--r--Source/WebCore/Modules/webaudio/AudioNodeOutput.h8
-rw-r--r--Source/WebCore/Modules/webaudio/AudioParam.cpp22
-rw-r--r--Source/WebCore/Modules/webaudio/AudioParam.h20
-rw-r--r--Source/WebCore/Modules/webaudio/AudioParam.idl25
-rw-r--r--Source/WebCore/Modules/webaudio/AudioParamTimeline.cpp28
-rw-r--r--Source/WebCore/Modules/webaudio/AudioParamTimeline.h18
-rw-r--r--Source/WebCore/Modules/webaudio/AudioProcessingEvent.cpp17
-rw-r--r--Source/WebCore/Modules/webaudio/AudioProcessingEvent.h23
-rw-r--r--Source/WebCore/Modules/webaudio/AudioProcessingEvent.idl1
-rw-r--r--Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp113
-rw-r--r--Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.h51
-rw-r--r--Source/WebCore/Modules/webaudio/AudioSummingJunction.cpp19
-rw-r--r--Source/WebCore/Modules/webaudio/AudioSummingJunction.h11
-rw-r--r--Source/WebCore/Modules/webaudio/BiquadDSPKernel.cpp16
-rw-r--r--Source/WebCore/Modules/webaudio/BiquadDSPKernel.h13
-rw-r--r--Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp73
-rw-r--r--Source/WebCore/Modules/webaudio/BiquadFilterNode.h34
-rw-r--r--Source/WebCore/Modules/webaudio/BiquadFilterNode.idl32
-rw-r--r--Source/WebCore/Modules/webaudio/BiquadProcessor.cpp11
-rw-r--r--Source/WebCore/Modules/webaudio/BiquadProcessor.h39
-rw-r--r--Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp10
-rw-r--r--Source/WebCore/Modules/webaudio/ChannelMergerNode.h22
-rw-r--r--Source/WebCore/Modules/webaudio/ChannelMergerNode.idl2
-rw-r--r--Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp6
-rw-r--r--Source/WebCore/Modules/webaudio/ChannelSplitterNode.h18
-rw-r--r--Source/WebCore/Modules/webaudio/ConvolverNode.cpp43
-rw-r--r--Source/WebCore/Modules/webaudio/ConvolverNode.h41
-rw-r--r--Source/WebCore/Modules/webaudio/ConvolverNode.idl2
-rw-r--r--Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp58
-rw-r--r--Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h41
-rw-r--r--Source/WebCore/Modules/webaudio/DelayDSPKernel.h13
-rw-r--r--Source/WebCore/Modules/webaudio/DelayNode.cpp17
-rw-r--r--Source/WebCore/Modules/webaudio/DelayNode.h19
-rw-r--r--Source/WebCore/Modules/webaudio/DelayProcessor.cpp2
-rw-r--r--Source/WebCore/Modules/webaudio/DelayProcessor.h9
-rw-r--r--Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp2
-rw-r--r--Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h23
-rw-r--r--Source/WebCore/Modules/webaudio/GainNode.cpp4
-rw-r--r--Source/WebCore/Modules/webaudio/GainNode.h22
-rw-r--r--Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp18
-rw-r--r--Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h30
-rw-r--r--Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.cpp22
-rw-r--r--Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h22
-rw-r--r--Source/WebCore/Modules/webaudio/MediaStreamAudioSource.cpp46
-rw-r--r--Source/WebCore/Modules/webaudio/MediaStreamAudioSource.h32
-rw-r--r--Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp95
-rw-r--r--Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.h42
-rw-r--r--Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.cpp18
-rw-r--r--Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.h16
-rw-r--r--Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp34
-rw-r--r--Source/WebCore/Modules/webaudio/OfflineAudioContext.h11
-rw-r--r--Source/WebCore/Modules/webaudio/OfflineAudioContext.idl6
-rw-r--r--Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp38
-rw-r--r--Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h23
-rw-r--r--Source/WebCore/Modules/webaudio/OscillatorNode.cpp114
-rw-r--r--Source/WebCore/Modules/webaudio/OscillatorNode.h54
-rw-r--r--Source/WebCore/Modules/webaudio/OscillatorNode.idl31
-rw-r--r--Source/WebCore/Modules/webaudio/PannerNode.cpp135
-rw-r--r--Source/WebCore/Modules/webaudio/PannerNode.h65
-rw-r--r--Source/WebCore/Modules/webaudio/PannerNode.idl49
-rw-r--r--Source/WebCore/Modules/webaudio/PeriodicWave.cpp58
-rw-r--r--Source/WebCore/Modules/webaudio/PeriodicWave.h27
-rw-r--r--Source/WebCore/Modules/webaudio/RealtimeAnalyser.cpp3
-rw-r--r--Source/WebCore/Modules/webaudio/RealtimeAnalyser.h7
-rw-r--r--Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp75
-rw-r--r--Source/WebCore/Modules/webaudio/ScriptProcessorNode.h30
-rw-r--r--Source/WebCore/Modules/webaudio/ScriptProcessorNode.idl2
-rw-r--r--Source/WebCore/Modules/webaudio/WaveShaperDSPKernel.h15
-rw-r--r--Source/WebCore/Modules/webaudio/WaveShaperNode.cpp56
-rw-r--r--Source/WebCore/Modules/webaudio/WaveShaperNode.h23
-rw-r--r--Source/WebCore/Modules/webaudio/WaveShaperNode.idl2
-rw-r--r--Source/WebCore/Modules/webaudio/WaveShaperProcessor.cpp12
-rw-r--r--Source/WebCore/Modules/webaudio/WaveShaperProcessor.h13
104 files changed, 2131 insertions, 2289 deletions
diff --git a/Source/WebCore/Modules/webaudio/AnalyserNode.cpp b/Source/WebCore/Modules/webaudio/AnalyserNode.cpp
index b54d3ae28..6aff9a63d 100644
--- a/Source/WebCore/Modules/webaudio/AnalyserNode.cpp
+++ b/Source/WebCore/Modules/webaudio/AnalyserNode.cpp
@@ -30,11 +30,10 @@
#include "AudioNodeInput.h"
#include "AudioNodeOutput.h"
-#include "ExceptionCode.h"
namespace WebCore {
-AnalyserNode::AnalyserNode(AudioContext* context, float sampleRate)
+AnalyserNode::AnalyserNode(AudioContext& context, float sampleRate)
: AudioBasicInspectorNode(context, sampleRate, 2)
{
setNodeType(NodeTypeAnalyser);
@@ -72,40 +71,38 @@ void AnalyserNode::reset()
m_analyser.reset();
}
-void AnalyserNode::setFftSize(unsigned size, ExceptionCode& ec)
+ExceptionOr<void> AnalyserNode::setFftSize(unsigned size)
{
if (!m_analyser.setFftSize(size))
- ec = INDEX_SIZE_ERR;
+ return Exception { INDEX_SIZE_ERR };
+ return { };
}
-void AnalyserNode::setMinDecibels(double k, ExceptionCode& ec)
+ExceptionOr<void> AnalyserNode::setMinDecibels(double k)
{
- if (k > maxDecibels()) {
- ec = INDEX_SIZE_ERR;
- return;
- }
+ if (k > maxDecibels())
+ return Exception { INDEX_SIZE_ERR };
m_analyser.setMinDecibels(k);
+ return { };
}
-void AnalyserNode::setMaxDecibels(double k, ExceptionCode& ec)
+ExceptionOr<void> AnalyserNode::setMaxDecibels(double k)
{
- if (k < minDecibels()) {
- ec = INDEX_SIZE_ERR;
- return;
- }
+ if (k < minDecibels())
+ return Exception { INDEX_SIZE_ERR };
m_analyser.setMaxDecibels(k);
+ return { };
}
-void AnalyserNode::setSmoothingTimeConstant(double k, ExceptionCode& ec)
+ExceptionOr<void> AnalyserNode::setSmoothingTimeConstant(double k)
{
- if (k < 0 || k > 1) {
- ec = INDEX_SIZE_ERR;
- return;
- }
+ if (k < 0 || k > 1)
+ return Exception { INDEX_SIZE_ERR };
m_analyser.setSmoothingTimeConstant(k);
+ return { };
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/AnalyserNode.h b/Source/WebCore/Modules/webaudio/AnalyserNode.h
index 290c67aa9..8bc63b442 100644
--- a/Source/WebCore/Modules/webaudio/AnalyserNode.h
+++ b/Source/WebCore/Modules/webaudio/AnalyserNode.h
@@ -22,56 +22,50 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AnalyserNode_h
-#define AnalyserNode_h
+#pragma once
#include "AudioBasicInspectorNode.h"
#include "RealtimeAnalyser.h"
-#include <wtf/Forward.h>
namespace WebCore {
-class AnalyserNode : public AudioBasicInspectorNode {
+class AnalyserNode final : public AudioBasicInspectorNode {
public:
- static PassRefPtr<AnalyserNode> create(AudioContext* context, float sampleRate)
+ static Ref<AnalyserNode> create(AudioContext& context, float sampleRate)
{
- return adoptRef(new AnalyserNode(context, sampleRate));
+ return adoptRef(*new AnalyserNode(context, sampleRate));
}
virtual ~AnalyserNode();
-
- // AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
- // Javascript bindings
unsigned fftSize() const { return m_analyser.fftSize(); }
- void setFftSize(unsigned size, ExceptionCode&);
+ ExceptionOr<void> setFftSize(unsigned);
unsigned frequencyBinCount() const { return m_analyser.frequencyBinCount(); }
- void setMinDecibels(double k, ExceptionCode&);
+ ExceptionOr<void> setMinDecibels(double);
double minDecibels() const { return m_analyser.minDecibels(); }
- void setMaxDecibels(double k, ExceptionCode&);
+ ExceptionOr<void> setMaxDecibels(double);
double maxDecibels() const { return m_analyser.maxDecibels(); }
- void setSmoothingTimeConstant(double k, ExceptionCode&);
+ ExceptionOr<void> setSmoothingTimeConstant(double);
double smoothingTimeConstant() const { return m_analyser.smoothingTimeConstant(); }
- void getFloatFrequencyData(JSC::Float32Array* array) { m_analyser.getFloatFrequencyData(array); }
- void getByteFrequencyData(JSC::Uint8Array* array) { m_analyser.getByteFrequencyData(array); }
- void getByteTimeDomainData(JSC::Uint8Array* array) { m_analyser.getByteTimeDomainData(array); }
+ void getFloatFrequencyData(const RefPtr<JSC::Float32Array>& array) { m_analyser.getFloatFrequencyData(array.get()); }
+ void getByteFrequencyData(const RefPtr<JSC::Uint8Array>& array) { m_analyser.getByteFrequencyData(array.get()); }
+ void getByteTimeDomainData(const RefPtr<JSC::Uint8Array>& array) { m_analyser.getByteTimeDomainData(array.get()); }
private:
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ AnalyserNode(AudioContext&, float sampleRate);
- AnalyserNode(AudioContext*, float sampleRate);
+ void process(size_t framesToProcess) final;
+ void reset() final;
+
+ double tailTime() const final { return 0; }
+ double latencyTime() const final { return 0; }
RealtimeAnalyser m_analyser;
};
} // namespace WebCore
-
-#endif // AnalyserNode_h
diff --git a/Source/WebCore/Modules/webaudio/AnalyserNode.idl b/Source/WebCore/Modules/webaudio/AnalyserNode.idl
index 458df84a6..d43f95910 100644
--- a/Source/WebCore/Modules/webaudio/AnalyserNode.idl
+++ b/Source/WebCore/Modules/webaudio/AnalyserNode.idl
@@ -26,21 +26,21 @@
Conditional=WEB_AUDIO,
JSGenerateToJSObject,
] interface AnalyserNode : AudioNode {
- [SetterRaisesException] attribute unsigned long fftSize;
+ [SetterMayThrowException] attribute unsigned long fftSize;
readonly attribute unsigned long frequencyBinCount;
// minDecibels / maxDecibels represent the range to scale the FFT analysis data for conversion to unsigned byte values.
- [SetterRaisesException] attribute double minDecibels;
- [SetterRaisesException] attribute double maxDecibels;
+ [SetterMayThrowException] attribute unrestricted double minDecibels;
+ [SetterMayThrowException] attribute unrestricted double maxDecibels;
// A value from 0.0 -> 1.0 where 0.0 represents no time averaging with the last analysis frame.
- [SetterRaisesException] attribute double smoothingTimeConstant;
+ [SetterMayThrowException] attribute unrestricted double smoothingTimeConstant;
// Copies the current frequency data into the passed array.
// If the array has fewer elements than the frequencyBinCount, the excess elements will be dropped.
- void getFloatFrequencyData(Float32Array array);
- void getByteFrequencyData(Uint8Array array);
+ void getFloatFrequencyData(Float32Array? array); // FIXME: The parameter should not be nullable.
+ void getByteFrequencyData(Uint8Array? array); // FIXME: The parameter should not be nullable.
// Real-time waveform data
- void getByteTimeDomainData(Uint8Array array);
+ void getByteTimeDomainData(Uint8Array? array); // FIXME: The parameter should not be nullable.
};
diff --git a/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp b/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp
index 3698bf5ef..292f05b8c 100644
--- a/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp
+++ b/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp
@@ -38,7 +38,7 @@ namespace WebCore {
AsyncAudioDecoder::AsyncAudioDecoder()
{
// Start worker thread.
- MutexLocker lock(m_threadCreationMutex);
+ LockHolder lock(m_threadCreationMutex);
m_threadID = createThread(AsyncAudioDecoder::threadEntry, this, "Audio Decoder");
}
@@ -51,15 +51,12 @@ AsyncAudioDecoder::~AsyncAudioDecoder()
m_threadID = 0;
}
-void AsyncAudioDecoder::decodeAsync(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
+void AsyncAudioDecoder::decodeAsync(Ref<ArrayBuffer>&& audioData, float sampleRate, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
{
ASSERT(isMainThread());
- ASSERT(audioData);
- if (!audioData)
- return;
- auto decodingTask = DecodingTask::create(audioData, sampleRate, successCallback, errorCallback);
- m_queue.append(std::move(decodingTask)); // note that ownership of the task is effectively taken by the queue.
+ auto decodingTask = std::make_unique<DecodingTask>(WTFMove(audioData), sampleRate, WTFMove(successCallback), WTFMove(errorCallback));
+ m_queue.append(WTFMove(decodingTask)); // note that ownership of the task is effectively taken by the queue.
}
// Asynchronously decode in this thread.
@@ -76,7 +73,7 @@ void AsyncAudioDecoder::runLoop()
{
// Wait for until we have m_threadID established before starting the run loop.
- MutexLocker lock(m_threadCreationMutex);
+ LockHolder lock(m_threadCreationMutex);
}
// Keep running decoding tasks until we're killed.
@@ -87,40 +84,23 @@ void AsyncAudioDecoder::runLoop()
}
}
-std::unique_ptr<AsyncAudioDecoder::DecodingTask> AsyncAudioDecoder::DecodingTask::create(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
-{
- return std::unique_ptr<DecodingTask>(new DecodingTask(audioData, sampleRate, successCallback, errorCallback));
-}
-
-AsyncAudioDecoder::DecodingTask::DecodingTask(ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback)
- : m_audioData(audioData)
+AsyncAudioDecoder::DecodingTask::DecodingTask(Ref<ArrayBuffer>&& audioData, float sampleRate, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
+ : m_audioData(WTFMove(audioData))
, m_sampleRate(sampleRate)
- , m_successCallback(successCallback)
- , m_errorCallback(errorCallback)
+ , m_successCallback(WTFMove(successCallback))
+ , m_errorCallback(WTFMove(errorCallback))
{
}
void AsyncAudioDecoder::DecodingTask::decode()
{
- ASSERT(m_audioData.get());
- if (!m_audioData.get())
- return;
-
// Do the actual decoding and invoke the callback.
m_audioBuffer = AudioBuffer::createFromAudioFileData(m_audioData->data(), m_audioData->byteLength(), false, sampleRate());
// Decoding is finished, but we need to do the callbacks on the main thread.
- callOnMainThread(notifyCompleteDispatch, this);
-}
-
-void AsyncAudioDecoder::DecodingTask::notifyCompleteDispatch(void* userData)
-{
- AsyncAudioDecoder::DecodingTask* task = reinterpret_cast<AsyncAudioDecoder::DecodingTask*>(userData);
- ASSERT(task);
- if (!task)
- return;
-
- task->notifyComplete();
+ callOnMainThread([this] {
+ notifyComplete();
+ });
}
void AsyncAudioDecoder::DecodingTask::notifyComplete()
diff --git a/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h b/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h
index ff096ecaf..bb5c5e337 100644
--- a/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h
+++ b/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h
@@ -22,13 +22,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AsyncAudioDecoder_h
-#define AsyncAudioDecoder_h
+#pragma once
#include <memory>
#include <wtf/Forward.h>
#include <wtf/MessageQueue.h>
-#include <wtf/PassRefPtr.h>
#include <wtf/RefPtr.h>
#include <wtf/Threading.h>
@@ -51,29 +49,25 @@ public:
~AsyncAudioDecoder();
// Must be called on the main thread.
- void decodeAsync(JSC::ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
+ void decodeAsync(Ref<JSC::ArrayBuffer>&& audioData, float sampleRate, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback);
private:
class DecodingTask {
WTF_MAKE_NONCOPYABLE(DecodingTask);
public:
- static std::unique_ptr<DecodingTask> create(JSC::ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
-
+ DecodingTask(Ref<JSC::ArrayBuffer>&& audioData, float sampleRate, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback);
void decode();
private:
- DecodingTask(JSC::ArrayBuffer* audioData, float sampleRate, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback);
-
- JSC::ArrayBuffer* audioData() { return m_audioData.get(); }
+ JSC::ArrayBuffer& audioData() { return m_audioData; }
float sampleRate() const { return m_sampleRate; }
AudioBufferCallback* successCallback() { return m_successCallback.get(); }
AudioBufferCallback* errorCallback() { return m_errorCallback.get(); }
AudioBuffer* audioBuffer() { return m_audioBuffer.get(); }
- static void notifyCompleteDispatch(void* userData);
void notifyComplete();
- RefPtr<JSC::ArrayBuffer> m_audioData;
+ Ref<JSC::ArrayBuffer> m_audioData;
float m_sampleRate;
RefPtr<AudioBufferCallback> m_successCallback;
RefPtr<AudioBufferCallback> m_errorCallback;
@@ -84,10 +78,8 @@ private:
void runLoop();
WTF::ThreadIdentifier m_threadID;
- Mutex m_threadCreationMutex;
+ Lock m_threadCreationMutex;
MessageQueue<DecodingTask> m_queue;
};
} // namespace WebCore
-
-#endif // AsyncAudioDecoder_h
diff --git a/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp b/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp
index 58fd9b2ab..27e56c4c6 100644
--- a/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp
@@ -28,15 +28,13 @@
#include "AudioBasicInspectorNode.h"
-#include "AudioContext.h"
#include "AudioNodeInput.h"
#include "AudioNodeOutput.h"
namespace WebCore {
-AudioBasicInspectorNode::AudioBasicInspectorNode(AudioContext* context, float sampleRate, unsigned outputChannelCount)
+AudioBasicInspectorNode::AudioBasicInspectorNode(AudioContext& context, float sampleRate, unsigned outputChannelCount)
: AudioNode(context, sampleRate)
- , m_needAutomaticPull(false)
{
addInput(std::make_unique<AudioNodeInput>(this));
addOutput(std::make_unique<AudioNodeOutput>(this, outputChannelCount));
@@ -51,29 +49,31 @@ void AudioBasicInspectorNode::pullInputs(size_t framesToProcess)
input(0)->pull(output(0)->bus(), framesToProcess);
}
-void AudioBasicInspectorNode::connect(AudioNode* destination, unsigned outputIndex, unsigned inputIndex, ExceptionCode& ec)
+ExceptionOr<void> AudioBasicInspectorNode::connect(AudioNode& destination, unsigned outputIndex, unsigned inputIndex)
{
ASSERT(isMainThread());
- AudioContext::AutoLocker locker(*context());
+ AudioContext::AutoLocker locker(context());
- AudioNode::connect(destination, outputIndex, inputIndex, ec);
+ auto result = AudioNode::connect(destination, outputIndex, inputIndex);
updatePullStatus();
+ return result;
}
-void AudioBasicInspectorNode::disconnect(unsigned outputIndex, ExceptionCode& ec)
+ExceptionOr<void> AudioBasicInspectorNode::disconnect(unsigned outputIndex)
{
ASSERT(isMainThread());
- AudioContext::AutoLocker locker(*context());
+ AudioContext::AutoLocker locker(context());
- AudioNode::disconnect(outputIndex, ec);
+ auto result = AudioNode::disconnect(outputIndex);
updatePullStatus();
+ return result;
}
void AudioBasicInspectorNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
{
- ASSERT(context()->isAudioThread() && context()->isGraphOwner());
+ ASSERT(context().isAudioThread() && context().isGraphOwner());
ASSERT(input == this->input(0));
if (input != this->input(0))
@@ -93,13 +93,13 @@ void AudioBasicInspectorNode::checkNumberOfChannelsForInput(AudioNodeInput* inpu
void AudioBasicInspectorNode::updatePullStatus()
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
if (output(0)->isConnected()) {
// When an AudioBasicInspectorNode is connected to a downstream node, it will get pulled by the
// downstream node, thus remove it from the context's automatic pull list.
if (m_needAutomaticPull) {
- context()->removeAutomaticPullNode(this);
+ context().removeAutomaticPullNode(this);
m_needAutomaticPull = false;
}
} else {
@@ -107,11 +107,11 @@ void AudioBasicInspectorNode::updatePullStatus()
if (numberOfInputConnections && !m_needAutomaticPull) {
// When an AudioBasicInspectorNode is not connected to any downstream node while still connected from
// upstream node(s), add it to the context's automatic pull list.
- context()->addAutomaticPullNode(this);
+ context().addAutomaticPullNode(this);
m_needAutomaticPull = true;
} else if (!numberOfInputConnections && m_needAutomaticPull) {
// The AudioBasicInspectorNode is connected to nothing, remove it from the context's automatic pull list.
- context()->removeAutomaticPullNode(this);
+ context().removeAutomaticPullNode(this);
m_needAutomaticPull = false;
}
}
diff --git a/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h b/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h
index 2f4258ea3..b86f1a387 100644
--- a/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h
+++ b/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioBasicInspectorNode_h
-#define AudioBasicInspectorNode_h
+#pragma once
#include "AudioNode.h"
@@ -34,19 +33,17 @@ namespace WebCore {
// AudioContext before the end of each render quantum so that it can inspect the audio stream.
class AudioBasicInspectorNode : public AudioNode {
public:
- AudioBasicInspectorNode(AudioContext*, float sampleRate, unsigned outputChannelCount);
-
- // AudioNode
- virtual void pullInputs(size_t framesToProcess) override;
- virtual void connect(AudioNode*, unsigned outputIndex, unsigned inputIndex, ExceptionCode&) override;
- virtual void disconnect(unsigned outputIndex, ExceptionCode&) override;
- virtual void checkNumberOfChannelsForInput(AudioNodeInput*) override;
+ AudioBasicInspectorNode(AudioContext&, float sampleRate, unsigned outputChannelCount);
private:
+ void pullInputs(size_t framesToProcess) override;
+ ExceptionOr<void> connect(AudioNode&, unsigned outputIndex, unsigned inputIndex) override;
+ ExceptionOr<void> disconnect(unsigned outputIndex) override;
+ void checkNumberOfChannelsForInput(AudioNodeInput*) override;
+
void updatePullStatus();
- bool m_needAutomaticPull; // When setting to true, AudioBasicInspectorNode will be pulled automaticlly by AudioContext before the end of each render quantum.
+
+ bool m_needAutomaticPull { false }; // When setting to true, AudioBasicInspectorNode will be pulled automatically by AudioContext before the end of each render quantum.
};
} // namespace WebCore
-
-#endif // AudioBasicInspectorNode_h
diff --git a/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp b/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp
index afa67a9ad..4279a0916 100644
--- a/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp
@@ -36,7 +36,7 @@
namespace WebCore {
-AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext* context, float sampleRate)
+AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext& context, float sampleRate)
: AudioNode(context, sampleRate)
{
addInput(std::make_unique<AudioNodeInput>(this));
@@ -102,7 +102,7 @@ void AudioBasicProcessorNode::reset()
// uninitialize and then re-initialize with the new channel count.
void AudioBasicProcessorNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
{
- ASSERT(context()->isAudioThread() && context()->isGraphOwner());
+ ASSERT(context().isAudioThread() && context().isGraphOwner());
ASSERT(input == this->input(0));
if (input != this->input(0))
diff --git a/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h b/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h
index d8946e1c9..e7e116b75 100644
--- a/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h
+++ b/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h
@@ -22,12 +22,10 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioBasicProcessorNode_h
-#define AudioBasicProcessorNode_h
+#pragma once
#include "AudioNode.h"
#include <memory>
-#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/Threading.h>
@@ -40,29 +38,27 @@ class AudioProcessor;
// AudioBasicProcessorNode is an AudioNode with one input and one output where the input and output have the same number of channels.
class AudioBasicProcessorNode : public AudioNode {
public:
- AudioBasicProcessorNode(AudioContext*, float sampleRate);
+ AudioBasicProcessorNode(AudioContext&, float sampleRate);
// AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void pullInputs(size_t framesToProcess) override;
- virtual void reset() override;
- virtual void initialize() override;
- virtual void uninitialize() override;
+ void process(size_t framesToProcess) override;
+ void pullInputs(size_t framesToProcess) override;
+ void reset() override;
+ void initialize() override;
+ void uninitialize() override;
// Called in the main thread when the number of channels for the input may have changed.
- virtual void checkNumberOfChannelsForInput(AudioNodeInput*) override;
+ void checkNumberOfChannelsForInput(AudioNodeInput*) override;
// Returns the number of channels for both the input and the output.
unsigned numberOfChannels();
protected:
- virtual double tailTime() const override;
- virtual double latencyTime() const override;
+ double tailTime() const override;
+ double latencyTime() const override;
AudioProcessor* processor() { return m_processor.get(); }
std::unique_ptr<AudioProcessor> m_processor;
};
} // namespace WebCore
-
-#endif // AudioBasicProcessorNode_h
diff --git a/Source/WebCore/Modules/webaudio/AudioBuffer.cpp b/Source/WebCore/Modules/webaudio/AudioBuffer.cpp
index 609ab567e..d14e6694c 100644
--- a/Source/WebCore/Modules/webaudio/AudioBuffer.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioBuffer.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,101 +32,110 @@
#include "AudioBuffer.h"
-#include "AudioBus.h"
#include "AudioContext.h"
#include "AudioFileReader.h"
-#include "ExceptionCode.h"
-#include "ExceptionCodePlaceholder.h"
-
-#include <runtime/Operations.h>
+#include <runtime/JSCInlines.h>
#include <runtime/TypedArrayInlines.h>
namespace WebCore {
-PassRefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
+RefPtr<AudioBuffer> AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
{
if (sampleRate < 22050 || sampleRate > 96000 || numberOfChannels > AudioContext::maxNumberOfChannels() || !numberOfFrames)
return nullptr;
-
- return adoptRef(new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate));
+
+ auto buffer = adoptRef(*new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate));
+ if (!buffer->m_length)
+ return nullptr;
+
+ return WTFMove(buffer);
}
-PassRefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
+RefPtr<AudioBuffer> AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate)
{
RefPtr<AudioBus> bus = createBusFromInMemoryAudioFile(data, dataSize, mixToMono, sampleRate);
- if (bus.get())
- return adoptRef(new AudioBuffer(bus.get()));
-
- return nullptr;
+ if (!bus)
+ return nullptr;
+ return adoptRef(*new AudioBuffer(*bus));
}
AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
- : m_gain(1.0)
- , m_sampleRate(sampleRate)
+ : m_sampleRate(sampleRate)
, m_length(numberOfFrames)
{
m_channels.reserveCapacity(numberOfChannels);
for (unsigned i = 0; i < numberOfChannels; ++i) {
- RefPtr<Float32Array> channelDataArray = Float32Array::create(m_length);
+ auto channelDataArray = Float32Array::create(m_length);
+ if (!channelDataArray) {
+ invalidate();
+ break;
+ }
+
channelDataArray->setNeuterable(false);
- m_channels.append(channelDataArray);
+ m_channels.append(WTFMove(channelDataArray));
}
}
-AudioBuffer::AudioBuffer(AudioBus* bus)
- : m_gain(1.0)
- , m_sampleRate(bus->sampleRate())
- , m_length(bus->length())
+AudioBuffer::AudioBuffer(AudioBus& bus)
+ : m_sampleRate(bus.sampleRate())
+ , m_length(bus.length())
{
// Copy audio data from the bus to the Float32Arrays we manage.
- unsigned numberOfChannels = bus->numberOfChannels();
+ unsigned numberOfChannels = bus.numberOfChannels();
m_channels.reserveCapacity(numberOfChannels);
for (unsigned i = 0; i < numberOfChannels; ++i) {
- RefPtr<Float32Array> channelDataArray = Float32Array::create(m_length);
+ auto channelDataArray = Float32Array::create(m_length);
+ if (!channelDataArray) {
+ invalidate();
+ break;
+ }
+
channelDataArray->setNeuterable(false);
- channelDataArray->setRange(bus->channel(i)->data(), m_length, 0);
- m_channels.append(channelDataArray);
+ channelDataArray->setRange(bus.channel(i)->data(), m_length, 0);
+ m_channels.append(WTFMove(channelDataArray));
}
}
+void AudioBuffer::invalidate()
+{
+ releaseMemory();
+ m_length = 0;
+}
+
void AudioBuffer::releaseMemory()
{
m_channels.clear();
}
-PassRefPtr<Float32Array> AudioBuffer::getChannelData(unsigned channelIndex, ExceptionCode& ec)
+ExceptionOr<Ref<Float32Array>> AudioBuffer::getChannelData(unsigned channelIndex)
{
- if (channelIndex >= m_channels.size()) {
- ec = SYNTAX_ERR;
- return nullptr;
- }
-
- Float32Array* channelData = m_channels[channelIndex].get();
- return Float32Array::create(channelData->buffer(), channelData->byteOffset(), channelData->length());
+ if (channelIndex >= m_channels.size())
+ return Exception { SYNTAX_ERR };
+ auto& channelData = *m_channels[channelIndex];
+ auto array = Float32Array::create(channelData.unsharedBuffer(), channelData.byteOffset(), channelData.length());
+ RELEASE_ASSERT(array);
+ return array.releaseNonNull();
}
-Float32Array* AudioBuffer::getChannelData(unsigned channelIndex)
+Float32Array* AudioBuffer::channelData(unsigned channelIndex)
{
if (channelIndex >= m_channels.size())
return nullptr;
-
return m_channels[channelIndex].get();
}
void AudioBuffer::zero()
{
- for (unsigned i = 0; i < m_channels.size(); ++i) {
- if (getChannelData(i))
- getChannelData(i)->zeroRange(0, length());
- }
+ for (auto& channel : m_channels)
+ channel->zeroRange(0, length());
}
size_t AudioBuffer::memoryCost() const
{
size_t cost = 0;
- for (unsigned i = 0; i < m_channels.size() ; ++i)
- cost += m_channels[i]->byteLength();
+ for (auto& channel : m_channels)
+ cost += channel->byteLength();
return cost;
}
diff --git a/Source/WebCore/Modules/webaudio/AudioBuffer.h b/Source/WebCore/Modules/webaudio/AudioBuffer.h
index d52c02805..9c9dd1dac 100644
--- a/Source/WebCore/Modules/webaudio/AudioBuffer.h
+++ b/Source/WebCore/Modules/webaudio/AudioBuffer.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,27 +26,22 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioBuffer_h
-#define AudioBuffer_h
+#pragma once
+#include "ExceptionOr.h"
#include <runtime/Float32Array.h>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefCounted.h>
-#include <wtf/RefPtr.h>
#include <wtf/Vector.h>
namespace WebCore {
class AudioBus;
-typedef int ExceptionCode;
-
class AudioBuffer : public RefCounted<AudioBuffer> {
public:
- static PassRefPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
+ static RefPtr<AudioBuffer> create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
- // Returns 0 if data is not a valid audio file.
- static PassRefPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
+ // Returns nullptr if data is not a valid audio file.
+ static RefPtr<AudioBuffer> createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate);
// Format
size_t length() const { return m_length; }
@@ -55,32 +50,31 @@ public:
// Channel data access
unsigned numberOfChannels() const { return m_channels.size(); }
- PassRefPtr<Float32Array> getChannelData(unsigned channelIndex, ExceptionCode&);
- Float32Array* getChannelData(unsigned channelIndex);
+ ExceptionOr<Ref<Float32Array>> getChannelData(unsigned channelIndex);
+ Float32Array* channelData(unsigned channelIndex);
void zero();
// Scalar gain
double gain() const { return m_gain; }
void setGain(double gain) { m_gain = gain; }
- // Because an AudioBuffer has a JavaScript wrapper, which will be garbage collected, it may take awhile for this object to be deleted.
+ // Because an AudioBuffer has a JavaScript wrapper, which will be garbage collected, it may take a while for this object to be deleted.
// releaseMemory() can be called when the AudioContext goes away, so we can release the memory earlier than when the garbage collection happens.
// Careful! Only call this when the page unloads, after the AudioContext is no longer processing.
void releaseMemory();
size_t memoryCost() const;
-protected:
+private:
AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
- explicit AudioBuffer(AudioBus*);
+ explicit AudioBuffer(AudioBus&);
+
+ void invalidate();
- double m_gain; // scalar gain
+ double m_gain { 1.0 }; // scalar gain
float m_sampleRate;
size_t m_length;
-
Vector<RefPtr<Float32Array>> m_channels;
};
} // namespace WebCore
-
-#endif // AudioBuffer_h
diff --git a/Source/WebCore/Modules/webaudio/AudioBuffer.idl b/Source/WebCore/Modules/webaudio/AudioBuffer.idl
index 37a570040..a1a902e06 100644
--- a/Source/WebCore/Modules/webaudio/AudioBuffer.idl
+++ b/Source/WebCore/Modules/webaudio/AudioBuffer.idl
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -29,14 +29,15 @@
[
Conditional=WEB_AUDIO,
ImplementationLacksVTable,
+ ReportExtraMemoryCost,
] interface AudioBuffer {
readonly attribute long length; // in sample-frames
- readonly attribute float duration; // in seconds
- readonly attribute float sampleRate; // in sample-frames per second
+ readonly attribute unrestricted float duration; // in seconds
+ readonly attribute unrestricted float sampleRate; // in sample-frames per second
- attribute float gain; // linear gain (default 1.0)
+ attribute unrestricted float gain; // linear gain (default 1.0)
// Channel access
readonly attribute unsigned long numberOfChannels;
- [RaisesException] Float32Array getChannelData(unsigned long channelIndex);
+ [MayThrowException] Float32Array getChannelData(unsigned long channelIndex);
};
diff --git a/Source/WebCore/Modules/webaudio/AudioBufferCallback.h b/Source/WebCore/Modules/webaudio/AudioBufferCallback.h
index feebc469b..27cc6ac79 100644
--- a/Source/WebCore/Modules/webaudio/AudioBufferCallback.h
+++ b/Source/WebCore/Modules/webaudio/AudioBufferCallback.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioBufferCallback_h
-#define AudioBufferCallback_h
+#pragma once
#if ENABLE(WEB_AUDIO)
@@ -39,8 +38,6 @@ public:
virtual bool handleEvent(AudioBuffer*) = 0;
};
-} // namespace
+} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
-
-#endif // AudioBufferCallback_h
diff --git a/Source/WebCore/Modules/webaudio/AudioBufferCallback.idl b/Source/WebCore/Modules/webaudio/AudioBufferCallback.idl
index 913f57779..74577fd0e 100644
--- a/Source/WebCore/Modules/webaudio/AudioBufferCallback.idl
+++ b/Source/WebCore/Modules/webaudio/AudioBufferCallback.idl
@@ -25,6 +25,4 @@
[
Conditional=WEB_AUDIO,
JSGenerateToJSObject,
-] callback interface AudioBufferCallback {
- boolean handleEvent(AudioBuffer audioBuffer);
-};
+] callback AudioBufferCallback = void (AudioBuffer audioBuffer);
diff --git a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
index 8ee6d61b9..11319a85c 100644
--- a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
@@ -28,17 +28,14 @@
#include "AudioBufferSourceNode.h"
+#include "AudioBuffer.h"
#include "AudioContext.h"
#include "AudioNodeOutput.h"
+#include "AudioParam.h"
#include "AudioUtilities.h"
#include "FloatConversion.h"
-#include "ScriptCallStack.h"
-#include "ScriptController.h"
+#include "PannerNode.h"
#include "ScriptExecutionContext.h"
-#include <algorithm>
-#include <wtf/MainThread.h>
-#include <wtf/MathExtras.h>
-#include <wtf/StdLibExtras.h>
namespace WebCore {
@@ -49,14 +46,14 @@ const double DefaultGrainDuration = 0.020; // 20ms
// to minimize linear interpolation aliasing.
const double MaxRate = 1024;
-PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, float sampleRate)
+Ref<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext& context, float sampleRate)
{
- return adoptRef(new AudioBufferSourceNode(context, sampleRate));
+ return adoptRef(*new AudioBufferSourceNode(context, sampleRate));
}
-AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sampleRate)
+AudioBufferSourceNode::AudioBufferSourceNode(AudioContext& context, float sampleRate)
: AudioScheduledSourceNode(context, sampleRate)
- , m_buffer(0)
+ , m_buffer(nullptr)
, m_isLooping(false)
, m_loopStart(0)
, m_loopEnd(0)
@@ -65,12 +62,12 @@ AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sample
, m_grainOffset(0.0)
, m_grainDuration(DefaultGrainDuration)
, m_lastGain(1.0)
- , m_pannerNode(0)
+ , m_pannerNode(nullptr)
{
setNodeType(NodeTypeAudioBufferSource);
m_gain = AudioParam::create(context, "gain", 1.0, 0.0, 1.0);
- m_playbackRate = AudioParam::create(context, "playbackRate", 1.0, 0.0, MaxRate);
+ m_playbackRate = AudioParam::create(context, "playbackRate", 1.0, -MaxRate, MaxRate);
// Default to mono. A call to setBuffer() will set the number of output channels to that of the buffer.
addOutput(std::make_unique<AudioNodeOutput>(this, 1));
@@ -86,23 +83,23 @@ AudioBufferSourceNode::~AudioBufferSourceNode()
void AudioBufferSourceNode::process(size_t framesToProcess)
{
- AudioBus* outputBus = output(0)->bus();
+ auto& outputBus = *output(0)->bus();
if (!isInitialized()) {
- outputBus->zero();
+ outputBus.zero();
return;
}
// The audio thread can't block on this lock, so we use std::try_to_lock instead.
- std::unique_lock<std::mutex> lock(m_processMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_processMutex, std::try_to_lock);
if (!lock.owns_lock()) {
// Too bad - the try_lock() failed. We must be in the middle of changing buffers and were already outputting silence anyway.
- outputBus->zero();
+ outputBus.zero();
return;
}
if (!buffer()) {
- outputBus->zero();
+ outputBus.zero();
return;
}
@@ -110,33 +107,32 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
// before the output bus is updated to the new number of channels because of use of tryLocks() in the context's updating system.
// In this case, if the the buffer has just been changed and we're not quite ready yet, then just output silence.
if (numberOfChannels() != buffer()->numberOfChannels()) {
- outputBus->zero();
+ outputBus.zero();
return;
}
size_t quantumFrameOffset;
size_t bufferFramesToProcess;
-
updateSchedulingInfo(framesToProcess, outputBus, quantumFrameOffset, bufferFramesToProcess);
if (!bufferFramesToProcess) {
- outputBus->zero();
+ outputBus.zero();
return;
}
- for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
- m_destinationChannels[i] = outputBus->channel(i)->mutableData();
+ for (unsigned i = 0; i < outputBus.numberOfChannels(); ++i)
+ m_destinationChannels[i] = outputBus.channel(i)->mutableData();
// Render by reading directly from the buffer.
- if (!renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProcess)) {
- outputBus->zero();
+ if (!renderFromBuffer(&outputBus, quantumFrameOffset, bufferFramesToProcess)) {
+ outputBus.zero();
return;
}
// Apply the gain (in-place) to the output bus.
float totalGain = gain()->value() * m_buffer->gain();
- outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);
- outputBus->clearSilentFlag();
+ outputBus.copyWithGainFrom(outputBus, &m_lastGain, totalGain);
+ outputBus.clearSilentFlag();
}
// Returns true if we're finished.
@@ -160,7 +156,7 @@ bool AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping(AudioBus*, unsign
bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destinationFrameOffset, size_t numberOfFrames)
{
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
// Basic sanity checking
ASSERT(bus);
@@ -200,47 +196,54 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
size_t bufferLength = buffer()->length();
double bufferSampleRate = buffer()->sampleRate();
+ double pitchRate = totalPitchRate();
+ bool reverse = pitchRate < 0;
// Avoid converting from time to sample-frames twice by computing
// the grain end time first before computing the sample frame.
- unsigned endFrame = m_isGrain ? AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, bufferSampleRate) : bufferLength;
-
- // This is a HACK to allow for HRTF tail-time - avoids glitch at end.
- // FIXME: implement tailTime for each AudioNode for a more general solution to this problem.
- // https://bugs.webkit.org/show_bug.cgi?id=77224
+ unsigned maxFrame;
if (m_isGrain)
- endFrame += 512;
+ maxFrame = AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, bufferSampleRate);
+ else
+ maxFrame = bufferLength;
// Do some sanity checking.
- if (endFrame > bufferLength)
- endFrame = bufferLength;
- if (m_virtualReadIndex >= endFrame)
+ if (maxFrame > bufferLength)
+ maxFrame = bufferLength;
+ if (reverse && m_virtualReadIndex <= 0)
+ m_virtualReadIndex = maxFrame - 1;
+ else if (!reverse && m_virtualReadIndex >= maxFrame)
m_virtualReadIndex = 0; // reset to start
// If the .loop attribute is true, then values of m_loopStart == 0 && m_loopEnd == 0 implies
// that we should use the entire buffer as the loop, otherwise use the loop values in m_loopStart and m_loopEnd.
- double virtualEndFrame = endFrame;
- double virtualDeltaFrames = endFrame;
+ double virtualMaxFrame = maxFrame;
+ double virtualMinFrame = 0;
+ double virtualDeltaFrames = maxFrame;
if (loop() && (m_loopStart || m_loopEnd) && m_loopStart >= 0 && m_loopEnd > 0 && m_loopStart < m_loopEnd) {
// Convert from seconds to sample-frames.
- double loopStartFrame = m_loopStart * buffer()->sampleRate();
- double loopEndFrame = m_loopEnd * buffer()->sampleRate();
+ double loopMinFrame = m_loopStart * buffer()->sampleRate();
+ double loopMaxFrame = m_loopEnd * buffer()->sampleRate();
- virtualEndFrame = std::min(loopEndFrame, virtualEndFrame);
- virtualDeltaFrames = virtualEndFrame - loopStartFrame;
+ virtualMaxFrame = std::min(loopMaxFrame, virtualMaxFrame);
+ virtualMinFrame = std::max(loopMinFrame, virtualMinFrame);
+ virtualDeltaFrames = virtualMaxFrame - virtualMinFrame;
}
- double pitchRate = totalPitchRate();
-
// Sanity check that our playback rate isn't larger than the loop size.
- if (pitchRate >= virtualDeltaFrames)
+ if (fabs(pitchRate) >= virtualDeltaFrames)
return false;
// Get local copy.
double virtualReadIndex = m_virtualReadIndex;
+ bool needsInterpolation = virtualReadIndex != floor(virtualReadIndex)
+ || virtualDeltaFrames != floor(virtualDeltaFrames)
+ || virtualMaxFrame != floor(virtualMaxFrame)
+ || virtualMinFrame != floor(virtualMinFrame);
+
// Render loop - reading from the source buffer to the destination using linear interpolation.
int framesToProcess = numberOfFrames;
@@ -249,14 +252,12 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
// Optimize for the very common case of playing back with pitchRate == 1.
// We can avoid the linear interpolation.
- if (pitchRate == 1 && virtualReadIndex == floor(virtualReadIndex)
- && virtualDeltaFrames == floor(virtualDeltaFrames)
- && virtualEndFrame == floor(virtualEndFrame)) {
+ if (pitchRate == 1 && !needsInterpolation) {
unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
unsigned deltaFrames = static_cast<unsigned>(virtualDeltaFrames);
- endFrame = static_cast<unsigned>(virtualEndFrame);
+ maxFrame = static_cast<unsigned>(virtualMaxFrame);
while (framesToProcess > 0) {
- int framesToEnd = endFrame - readIndex;
+ int framesToEnd = maxFrame - readIndex;
int framesThisTime = std::min(framesToProcess, framesToEnd);
framesThisTime = std::max(0, framesThisTime);
@@ -268,13 +269,83 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
framesToProcess -= framesThisTime;
// Wrap-around.
- if (readIndex >= endFrame) {
+ if (readIndex >= maxFrame) {
readIndex -= deltaFrames;
if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
break;
}
}
virtualReadIndex = readIndex;
+ } else if (pitchRate == -1 && !needsInterpolation) {
+ int readIndex = static_cast<int>(virtualReadIndex);
+ int deltaFrames = static_cast<int>(virtualDeltaFrames);
+ int minFrame = static_cast<int>(virtualMinFrame) - 1;
+ while (framesToProcess > 0) {
+ int framesToEnd = readIndex - minFrame;
+ int framesThisTime = std::min<int>(framesToProcess, framesToEnd);
+ framesThisTime = std::max<int>(0, framesThisTime);
+
+ while (framesThisTime--) {
+ for (unsigned i = 0; i < numberOfChannels; ++i) {
+ float* destination = destinationChannels[i];
+ const float* source = sourceChannels[i];
+
+ destination[writeIndex] = source[readIndex];
+ }
+
+ ++writeIndex;
+ --readIndex;
+ --framesToProcess;
+ }
+
+ // Wrap-around.
+ if (readIndex <= minFrame) {
+ readIndex += deltaFrames;
+ if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
+ break;
+ }
+ }
+ virtualReadIndex = readIndex;
+ } else if (!pitchRate) {
+ unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
+
+ for (unsigned i = 0; i < numberOfChannels; ++i)
+ std::fill_n(destinationChannels[i], framesToProcess, sourceChannels[i][readIndex]);
+ } else if (reverse) {
+ unsigned maxFrame = static_cast<unsigned>(virtualMaxFrame);
+ unsigned minFrame = static_cast<unsigned>(floorf(virtualMinFrame));
+
+ while (framesToProcess--) {
+ unsigned readIndex = static_cast<unsigned>(floorf(virtualReadIndex));
+ double interpolationFactor = virtualReadIndex - readIndex;
+
+ unsigned readIndex2 = readIndex + 1;
+ if (readIndex2 >= maxFrame)
+ readIndex2 = loop() ? minFrame : maxFrame - 1;
+
+ // Linear interpolation.
+ for (unsigned i = 0; i < numberOfChannels; ++i) {
+ float* destination = destinationChannels[i];
+ const float* source = sourceChannels[i];
+
+ double sample1 = source[readIndex];
+ double sample2 = source[readIndex2];
+ double sample = (1.0 - interpolationFactor) * sample1 + interpolationFactor * sample2;
+
+ destination[writeIndex] = narrowPrecisionToFloat(sample);
+ }
+
+ writeIndex++;
+
+ virtualReadIndex += pitchRate;
+
+ // Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point.
+ if (virtualReadIndex < virtualMinFrame) {
+ virtualReadIndex += virtualDeltaFrames;
+ if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
+ break;
+ }
+ }
} else {
while (framesToProcess--) {
unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
@@ -311,7 +382,7 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
virtualReadIndex += pitchRate;
// Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point.
- if (virtualReadIndex >= virtualEndFrame) {
+ if (virtualReadIndex >= virtualMaxFrame) {
virtualReadIndex -= virtualDeltaFrames;
if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
break;
@@ -333,22 +404,20 @@ void AudioBufferSourceNode::reset()
m_lastGain = gain()->value();
}
-bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer)
+void AudioBufferSourceNode::setBuffer(RefPtr<AudioBuffer>&& buffer)
{
ASSERT(isMainThread());
// The context must be locked since changing the buffer can re-configure the number of channels that are output.
- AudioContext::AutoLocker contextLocker(*context());
+ AudioContext::AutoLocker contextLocker(context());
// This synchronizes with process().
- std::lock_guard<std::mutex> lock(m_processMutex);
+ std::lock_guard<Lock> lock(m_processMutex);
if (buffer) {
// Do any necesssary re-configuration to the buffer's number of channels.
unsigned numberOfChannels = buffer->numberOfChannels();
-
- if (numberOfChannels > AudioContext::maxNumberOfChannels())
- return false;
+ ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
output(0)->setNumberOfChannels(numberOfChannels);
@@ -356,13 +425,11 @@ bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer)
m_destinationChannels = std::make_unique<float*[]>(numberOfChannels);
for (unsigned i = 0; i < numberOfChannels; ++i)
- m_sourceChannels[i] = buffer->getChannelData(i)->data();
+ m_sourceChannels[i] = buffer->channelData(i)->data();
}
m_virtualReadIndex = 0;
- m_buffer = buffer;
-
- return true;
+ m_buffer = WTFMove(buffer);
}
unsigned AudioBufferSourceNode::numberOfChannels()
@@ -370,61 +437,67 @@ unsigned AudioBufferSourceNode::numberOfChannels()
return output(0)->numberOfChannels();
}
-void AudioBufferSourceNode::startGrain(double when, double grainOffset, ExceptionCode& ec)
+ExceptionOr<void> AudioBufferSourceNode::start(double when, double grainOffset, std::optional<double> optionalGrainDuration)
{
- // Duration of 0 has special value, meaning calculate based on the entire buffer's duration.
- startGrain(when, grainOffset, 0, ec);
+ double grainDuration = 0;
+ if (optionalGrainDuration)
+ grainDuration = optionalGrainDuration.value();
+ else if (buffer())
+ grainDuration = buffer()->duration() - grainOffset;
+
+ return startPlaying(Partial, when, grainOffset, grainDuration);
}
-void AudioBufferSourceNode::startGrain(double when, double grainOffset, double grainDuration, ExceptionCode& ec)
+ExceptionOr<void> AudioBufferSourceNode::startPlaying(BufferPlaybackMode playbackMode, double when, double grainOffset, double grainDuration)
{
ASSERT(isMainThread());
- if (ScriptController::processingUserGesture())
- context()->removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
+ context().nodeWillBeginPlayback();
- if (m_playbackState != UNSCHEDULED_STATE) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (m_playbackState != UNSCHEDULED_STATE)
+ return Exception { INVALID_STATE_ERR };
+
+ if (!std::isfinite(when) || (when < 0))
+ return Exception { INVALID_STATE_ERR };
+
+ if (!std::isfinite(grainOffset) || (grainOffset < 0))
+ return Exception { INVALID_STATE_ERR };
+
+ if (!std::isfinite(grainDuration) || (grainDuration < 0))
+ return Exception { INVALID_STATE_ERR };
if (!buffer())
- return;
-
- // Do sanity checking of grain parameters versus buffer size.
- double bufferDuration = buffer()->duration();
+ return { };
- grainOffset = std::max(0.0, grainOffset);
- grainOffset = std::min(bufferDuration, grainOffset);
- m_grainOffset = grainOffset;
+ m_isGrain = playbackMode == Partial;
+ if (m_isGrain) {
+ // Do sanity checking of grain parameters versus buffer size.
+ double bufferDuration = buffer()->duration();
- // Handle default/unspecified duration.
- double maxDuration = bufferDuration - grainOffset;
- if (!grainDuration)
- grainDuration = maxDuration;
+ m_grainOffset = std::min(bufferDuration, grainOffset);
- grainDuration = std::max(0.0, grainDuration);
- grainDuration = std::min(maxDuration, grainDuration);
- m_grainDuration = grainDuration;
+ double maxDuration = bufferDuration - m_grainOffset;
+ m_grainDuration = std::min(maxDuration, grainDuration);
+ } else {
+ m_grainOffset = 0.0;
+ m_grainDuration = buffer()->duration();
+ }
- m_isGrain = true;
m_startTime = when;
// We call timeToSampleFrame here since at playbackRate == 1 we don't want to go through linear interpolation
// at a sub-sample position since it will degrade the quality.
// When aligned to the sample-frame the playback will be identical to the PCM data stored in the buffer.
// Since playbackRate == 1 is very common, it's worth considering quality.
- m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
+ if (totalPitchRate() < 0)
+ m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, buffer()->sampleRate()) - 1;
+ else
+ m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
m_playbackState = SCHEDULED_STATE;
-}
-#if ENABLE(LEGACY_WEB_AUDIO)
-void AudioBufferSourceNode::noteGrainOn(double when, double grainOffset, double grainDuration, ExceptionCode& ec)
-{
- startGrain(when, grainOffset, grainDuration, ec);
+ return { };
}
-#endif
double AudioBufferSourceNode::totalPitchRate()
{
@@ -442,11 +515,7 @@ double AudioBufferSourceNode::totalPitchRate()
double totalRate = dopplerRate * sampleRateFactor * basePitchRate;
- // Sanity check the total rate. It's very important that the resampler not get any bad rate values.
- totalRate = std::max(0.0, totalRate);
- if (!totalRate)
- totalRate = 1; // zero rate is considered illegal
- totalRate = std::min(MaxRate, totalRate);
+ totalRate = std::max(-MaxRate, std::min(MaxRate, totalRate));
bool isTotalRateValid = !std::isnan(totalRate) && !std::isinf(totalRate);
ASSERT(isTotalRateValid);
@@ -459,8 +528,8 @@ double AudioBufferSourceNode::totalPitchRate()
bool AudioBufferSourceNode::looping()
{
static bool firstTime = true;
- if (firstTime && context() && context()->scriptExecutionContext()) {
- context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead.");
+ if (firstTime && context().scriptExecutionContext()) {
+ context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead."));
firstTime = false;
}
@@ -470,8 +539,8 @@ bool AudioBufferSourceNode::looping()
void AudioBufferSourceNode::setLooping(bool looping)
{
static bool firstTime = true;
- if (firstTime && context() && context()->scriptExecutionContext()) {
- context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead.");
+ if (firstTime && context().scriptExecutionContext()) {
+ context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead."));
firstTime = false;
}
@@ -499,7 +568,7 @@ void AudioBufferSourceNode::clearPannerNode()
{
if (m_pannerNode) {
m_pannerNode->deref(AudioNode::RefTypeConnection);
- m_pannerNode = 0;
+ m_pannerNode = nullptr;
}
}
diff --git a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h
index 15fc56a0b..f4e63a859 100644
--- a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h
+++ b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h
@@ -22,40 +22,32 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioBufferSourceNode_h
-#define AudioBufferSourceNode_h
+#pragma once
-#include "AudioBuffer.h"
-#include "AudioBus.h"
-#include "AudioParam.h"
#include "AudioScheduledSourceNode.h"
-#include "ExceptionCode.h"
-#include "PannerNode.h"
-#include <memory>
-#include <mutex>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefPtr.h>
+#include <wtf/Lock.h>
namespace WebCore {
-class AudioContext;
+class AudioBuffer;
+class PannerNode;
// AudioBufferSourceNode is an AudioNode representing an audio source from an in-memory audio asset represented by an AudioBuffer.
// It generally will be used for short sounds which require a high degree of scheduling flexibility (can playback in rhythmically perfect ways).
-class AudioBufferSourceNode : public AudioScheduledSourceNode {
+class AudioBufferSourceNode final : public AudioScheduledSourceNode {
public:
- static PassRefPtr<AudioBufferSourceNode> create(AudioContext*, float sampleRate);
+ static Ref<AudioBufferSourceNode> create(AudioContext&, float sampleRate);
virtual ~AudioBufferSourceNode();
// AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
+ void process(size_t framesToProcess) final;
+ void reset() final;
// setBuffer() is called on the main thread. This is the buffer we use for playback.
// returns true on success.
- bool setBuffer(AudioBuffer*);
+ void setBuffer(RefPtr<AudioBuffer>&&);
AudioBuffer* buffer() { return m_buffer.get(); }
// numberOfChannels() returns the number of output channels. This value equals the number of channels from the buffer.
@@ -63,12 +55,7 @@ public:
unsigned numberOfChannels();
// Play-state
- void startGrain(double when, double grainOffset, ExceptionCode&);
- void startGrain(double when, double grainOffset, double grainDuration, ExceptionCode&);
-
-#if ENABLE(LEGACY_WEB_AUDIO)
- void noteGrainOn(double when, double grainOffset, double grainDuration, ExceptionCode&);
-#endif
+ ExceptionOr<void> start(double when, double grainOffset, std::optional<double> grainDuration);
// Note: the attribute was originally exposed as .looping, but to be more consistent in naming with <audio>
// and with how it's described in the specification, the proper attribute name is .loop
@@ -94,16 +81,23 @@ public:
void clearPannerNode();
// If we are no longer playing, propogate silence ahead to downstream nodes.
- virtual bool propagatesSilence() const override;
+ bool propagatesSilence() const final;
// AudioScheduledSourceNode
- virtual void finish() override;
+ void finish() final;
private:
- AudioBufferSourceNode(AudioContext*, float sampleRate);
+ AudioBufferSourceNode(AudioContext&, float sampleRate);
+
+ double tailTime() const final { return 0; }
+ double latencyTime() const final { return 0; }
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ enum BufferPlaybackMode {
+ Entire,
+ Partial
+ };
+
+ ExceptionOr<void> startPlaying(BufferPlaybackMode, double when, double grainOffset, double grainDuration);
// Returns true on success.
bool renderFromBuffer(AudioBus*, unsigned destinationFrameOffset, size_t numberOfFrames);
@@ -150,9 +144,7 @@ private:
PannerNode* m_pannerNode;
// This synchronizes process() with setBuffer() which can cause dynamic channel count changes.
- mutable std::mutex m_processMutex;
+ mutable Lock m_processMutex;
};
} // namespace WebCore
-
-#endif // AudioBufferSourceNode_h
diff --git a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.idl b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.idl
index d792846ee..f42bfe959 100644
--- a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.idl
+++ b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.idl
@@ -27,7 +27,7 @@
Conditional=WEB_AUDIO,
JSGenerateToJSObject,
] interface AudioBufferSourceNode : AudioNode {
- [CustomSetter, SetterRaisesException] attribute AudioBuffer buffer;
+ attribute AudioBuffer? buffer;
const unsigned short UNSCHEDULED_STATE = 0;
const unsigned short SCHEDULED_STATE = 1;
@@ -39,21 +39,13 @@
readonly attribute AudioParam gain;
readonly attribute AudioParam playbackRate;
- attribute boolean loop; // This is the proper attribute name from the specification.
+ attribute boolean loop;
- attribute double loopStart;
- attribute double loopEnd;
+ attribute unrestricted double loopStart;
+ attribute unrestricted double loopEnd;
- [RaisesException] void start(double when);
- [ImplementedAs=startGrain, RaisesException] void start(double when, double grainOffset);
- [ImplementedAs=startGrain, RaisesException] void start(double when, double grainOffset, double grainDuration);
- [RaisesException] void stop(double when);
+ [MayThrowException] void start(optional unrestricted double when = 0, optional unrestricted double grainOffset = 0, optional unrestricted double grainDuration);
+ [MayThrowException] void stop(optional unrestricted double when = 0);
- [Conditional=LEGACY_WEB_AUDIO] attribute boolean looping; // This is an alias for the .loop attribute for backwards compatibility.
-
- [Conditional=LEGACY_WEB_AUDIO, RaisesException] void noteOn(double when);
- [Conditional=LEGACY_WEB_AUDIO, RaisesException] void noteGrainOn(double when, double grainOffset, double grainDuration);
- [Conditional=LEGACY_WEB_AUDIO, RaisesException] void noteOff(double when);
-
- attribute EventListener onended;
+ attribute EventHandler onended;
};
diff --git a/Source/WebCore/Modules/webaudio/AudioContext.cpp b/Source/WebCore/Modules/webaudio/AudioContext.cpp
index 4854ff03b..0e1c78f1c 100644
--- a/Source/WebCore/Modules/webaudio/AudioContext.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioContext.cpp
@@ -1,5 +1,6 @@
/*
- * Copyright (C) 2010, Google Inc. All rights reserved.
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -44,21 +45,26 @@
#include "DelayNode.h"
#include "Document.h"
#include "DynamicsCompressorNode.h"
+#include "EventNames.h"
#include "ExceptionCode.h"
#include "FFTFrame.h"
+#include "Frame.h"
#include "GainNode.h"
+#include "GenericEventQueue.h"
#include "HRTFDatabaseLoader.h"
#include "HRTFPanner.h"
+#include "JSDOMPromise.h"
+#include "NetworkingContext.h"
#include "OfflineAudioCompletionEvent.h"
#include "OfflineAudioDestinationNode.h"
#include "OscillatorNode.h"
#include "Page.h"
#include "PannerNode.h"
#include "PeriodicWave.h"
-#include "ScriptCallStack.h"
#include "ScriptController.h"
#include "ScriptProcessorNode.h"
#include "WaveShaperNode.h"
+#include <inspector/ScriptCallStack.h>
#if ENABLE(MEDIA_STREAM)
#include "MediaStream.h"
@@ -110,69 +116,45 @@ bool AudioContext::isSampleRateRangeGood(float sampleRate)
const unsigned MaxHardwareContexts = 4;
unsigned AudioContext::s_hardwareContextCount = 0;
-PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionCode& ec)
+RefPtr<AudioContext> AudioContext::create(Document& document)
{
- UNUSED_PARAM(ec);
-
ASSERT(isMainThread());
if (s_hardwareContextCount >= MaxHardwareContexts)
return nullptr;
RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
audioContext->suspendIfNeeded();
- return audioContext.release();
+ return audioContext;
}
// Constructor for rendering to the audio hardware.
AudioContext::AudioContext(Document& document)
: ActiveDOMObject(&document)
- , m_isStopScheduled(false)
- , m_isInitialized(false)
- , m_isAudioThreadFinished(false)
- , m_destinationNode(0)
- , m_isDeletionScheduled(false)
- , m_automaticPullNodesNeedUpdating(false)
- , m_connectionCount(0)
- , m_audioThread(0)
+ , m_mediaSession(PlatformMediaSession::create(*this))
+ , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
, m_graphOwnerThread(UndefinedThreadIdentifier)
- , m_isOfflineContext(false)
- , m_activeSourceCount(0)
- , m_restrictions(NoRestrictions)
{
constructCommon();
- m_destinationNode = DefaultAudioDestinationNode::create(this);
+ m_destinationNode = DefaultAudioDestinationNode::create(*this);
- // This sets in motion an asynchronous loading mechanism on another thread.
- // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
- // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
- // when this has finished (see AudioDestinationNode).
- m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
+ // Initialize the destination node's muted state to match the page's current muted state.
+ pageMutedStateDidChange();
}
// Constructor for offline (non-realtime) rendering.
AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
: ActiveDOMObject(&document)
- , m_isStopScheduled(false)
- , m_isInitialized(false)
- , m_isAudioThreadFinished(false)
- , m_destinationNode(0)
- , m_automaticPullNodesNeedUpdating(false)
- , m_connectionCount(0)
- , m_audioThread(0)
- , m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(true)
- , m_activeSourceCount(0)
- , m_restrictions(NoRestrictions)
+ , m_mediaSession(PlatformMediaSession::create(*this))
+ , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
+ , m_graphOwnerThread(UndefinedThreadIdentifier)
{
constructCommon();
- // FIXME: the passed in sampleRate MUST match the hardware sample-rate since HRTFDatabaseLoader is a singleton.
- m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
-
// Create a new destination for offline rendering.
m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
- m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
+ m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get());
}
void AudioContext::constructCommon()
@@ -190,13 +172,13 @@ void AudioContext::constructCommon()
m_listener = AudioListener::create();
#if PLATFORM(IOS)
- if (!document()->settings() || document()->settings()->mediaPlaybackRequiresUserGesture())
+ if (document()->settings().audioPlaybackRequiresUserGesture())
addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
else
m_restrictions = NoRestrictions;
#endif
-#if PLATFORM(MAC)
+#if PLATFORM(COCOA)
addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
#endif
}
@@ -206,47 +188,50 @@ AudioContext::~AudioContext()
#if DEBUG_AUDIONODE_REFERENCES
fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
#endif
- // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
ASSERT(!m_isInitialized);
ASSERT(m_isStopScheduled);
- ASSERT(!m_nodesToDelete.size());
- ASSERT(!m_referencedNodes.size());
- ASSERT(!m_finishedNodes.size());
- ASSERT(!m_automaticPullNodes.size());
+ ASSERT(m_nodesToDelete.isEmpty());
+ ASSERT(m_referencedNodes.isEmpty());
+ ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
+ ASSERT(m_automaticPullNodes.isEmpty());
if (m_automaticPullNodesNeedUpdating)
m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
- ASSERT(!m_renderingAutomaticPullNodes.size());
+ ASSERT(m_renderingAutomaticPullNodes.isEmpty());
+ // FIXME: Can we assert that m_deferredFinishDerefList is empty?
}
void AudioContext::lazyInitialize()
{
- if (!m_isInitialized) {
- // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
- ASSERT(!m_isAudioThreadFinished);
- if (!m_isAudioThreadFinished) {
- if (m_destinationNode.get()) {
- m_destinationNode->initialize();
-
- if (!isOfflineContext()) {
- // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
- // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
- // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
- // We may want to consider requiring it for symmetry with OfflineAudioContext.
- startRendering();
- ++s_hardwareContextCount;
- }
-
- }
- m_isInitialized = true;
+ if (m_isInitialized)
+ return;
+
+ // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
+ ASSERT(!m_isAudioThreadFinished);
+ if (m_isAudioThreadFinished)
+ return;
+
+ if (m_destinationNode) {
+ m_destinationNode->initialize();
+
+ if (!isOfflineContext()) {
+ document()->addAudioProducer(this);
+
+ // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
+ // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
+ // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
+ // We may want to consider requiring it for symmetry with OfflineAudioContext.
+ startRendering();
+ ++s_hardwareContextCount;
}
}
+ m_isInitialized = true;
}
void AudioContext::clear()
{
// We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
if (m_destinationNode)
- m_destinationNode.clear();
+ m_destinationNode = nullptr;
// Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
do {
@@ -273,8 +258,13 @@ void AudioContext::uninitialize()
m_isAudioThreadFinished = true;
if (!isOfflineContext()) {
+ document()->removeAudioProducer(this);
+
ASSERT(s_hardwareContextCount);
--s_hardwareContextCount;
+
+ // Offline contexts move to 'Closed' state when dispatching the completion event.
+ setState(State::Closed);
}
// Get rid of the sources which may still be playing.
@@ -288,329 +278,293 @@ bool AudioContext::isInitialized() const
return m_isInitialized;
}
-bool AudioContext::isRunnable() const
+void AudioContext::addReaction(State state, DOMPromise<void>&& promise)
{
- if (!isInitialized())
- return false;
-
- // Check with the HRTF spatialization system to see if it's finished loading.
- return m_hrtfDatabaseLoader->isLoaded();
+ size_t stateIndex = static_cast<size_t>(state);
+ if (stateIndex >= m_stateReactions.size())
+ m_stateReactions.resize(stateIndex + 1);
+
+ m_stateReactions[stateIndex].append(WTFMove(promise));
}
-void AudioContext::stopDispatch(void* userData)
+void AudioContext::setState(State state)
{
- AudioContext* context = reinterpret_cast<AudioContext*>(userData);
- ASSERT(context);
- if (!context)
+ if (m_state == state)
+ return;
+
+ m_state = state;
+ m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, true, false));
+
+ size_t stateIndex = static_cast<size_t>(state);
+ if (stateIndex >= m_stateReactions.size())
return;
- context->uninitialize();
- context->clear();
+ Vector<DOMPromise<void>> reactions;
+ m_stateReactions[stateIndex].swap(reactions);
+
+ for (auto& promise : reactions)
+ promise.resolve();
}
void AudioContext::stop()
{
+ ASSERT(isMainThread());
+
// Usually ScriptExecutionContext calls stop twice.
if (m_isStopScheduled)
return;
m_isStopScheduled = true;
+ document()->updateIsPlayingMedia();
+
+ m_eventQueue->close();
+
// Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
// of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
// ActiveDOMObjects so let's schedule uninitialize() to be called later.
// FIXME: see if there's a more direct way to handle this issue.
- callOnMainThread(stopDispatch, this);
+ // FIXME: This sounds very wrong. The whole idea of stop() is that it stops everything, and if we
+ // schedule some observable work for later, the work likely happens at an inappropriate time.
+ callOnMainThread([this] {
+ uninitialize();
+ clear();
+ });
}
-Document* AudioContext::document() const
+bool AudioContext::canSuspendForDocumentSuspension() const
{
- ASSERT(m_scriptExecutionContext && m_scriptExecutionContext->isDocument());
- return static_cast<Document*>(m_scriptExecutionContext);
+ // FIXME: We should be able to suspend while rendering as well with some more code.
+ return m_state == State::Suspended || m_state == State::Closed;
}
-PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
+const char* AudioContext::activeDOMObjectName() const
{
- RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
- if (!audioBuffer.get()) {
- ec = NOT_SUPPORTED_ERR;
- return nullptr;
- }
+ return "AudioContext";
+}
- return audioBuffer;
+Document* AudioContext::document() const
+{
+ ASSERT(m_scriptExecutionContext);
+ return downcast<Document>(m_scriptExecutionContext);
}
-PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionCode& ec)
+const Document* AudioContext::hostingDocument() const
{
- ASSERT(arrayBuffer);
- if (!arrayBuffer) {
- ec = SYNTAX_ERR;
- return nullptr;
- }
+ return downcast<Document>(m_scriptExecutionContext);
+}
- RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
- if (!audioBuffer.get()) {
- ec = SYNTAX_ERR;
- return nullptr;
+String AudioContext::sourceApplicationIdentifier() const
+{
+ Document* document = this->document();
+ if (Frame* frame = document ? document->frame() : nullptr) {
+ if (NetworkingContext* networkingContext = frame->loader().networkingContext())
+ return networkingContext->sourceApplicationIdentifier();
}
+ return emptyString();
+}
- return audioBuffer;
+ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
+{
+ auto audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
+ if (!audioBuffer)
+ return Exception { NOT_SUPPORTED_ERR };
+ return audioBuffer.releaseNonNull();
}
-void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec)
+ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(ArrayBuffer& arrayBuffer, bool mixToMono)
{
- if (!audioData) {
- ec = SYNTAX_ERR;
- return;
- }
- m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
+ auto audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer.data(), arrayBuffer.byteLength(), mixToMono, sampleRate());
+ if (!audioBuffer)
+ return Exception { SYNTAX_ERR };
+ return audioBuffer.releaseNonNull();
+}
+
+void AudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
+{
+ m_audioDecoder.decodeAsync(WTFMove(audioData), sampleRate(), WTFMove(successCallback), WTFMove(errorCallback));
}
-PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
+Ref<AudioBufferSourceNode> AudioContext::createBufferSource()
{
ASSERT(isMainThread());
lazyInitialize();
- RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
+ Ref<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, m_destinationNode->sampleRate());
// Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
// When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
- refNode(node.get());
+ refNode(node);
return node;
}
#if ENABLE(VIDEO)
-PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionCode& ec)
+
+ExceptionOr<Ref<MediaElementAudioSourceNode>> AudioContext::createMediaElementSource(HTMLMediaElement& mediaElement)
{
- ASSERT(mediaElement);
- if (!mediaElement) {
- ec = INVALID_STATE_ERR;
- return nullptr;
- }
-
ASSERT(isMainThread());
lazyInitialize();
- // First check if this media element already has a source node.
- if (mediaElement->audioSourceNode()) {
- ec = INVALID_STATE_ERR;
- return nullptr;
- }
-
- RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
+ if (mediaElement.audioSourceNode())
+ return Exception { INVALID_STATE_ERR };
+
+ auto node = MediaElementAudioSourceNode::create(*this, mediaElement);
- mediaElement->setAudioSourceNode(node.get());
+ mediaElement.setAudioSourceNode(node.ptr());
refNode(node.get()); // context keeps reference until node is disconnected
- return node;
+ return WTFMove(node);
}
+
#endif
#if ENABLE(MEDIA_STREAM)
-PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
-{
- ASSERT(mediaStream);
- if (!mediaStream) {
- ec = INVALID_STATE_ERR;
- return nullptr;
- }
+ExceptionOr<Ref<MediaStreamAudioSourceNode>> AudioContext::createMediaStreamSource(MediaStream& mediaStream)
+{
ASSERT(isMainThread());
- lazyInitialize();
-
- AudioSourceProvider* provider = 0;
- Vector<RefPtr<MediaStreamTrack>> audioTracks = mediaStream->getAudioTracks();
- RefPtr<MediaStreamTrack> audioTrack;
+ auto audioTracks = mediaStream.getAudioTracks();
+ if (audioTracks.isEmpty())
+ return Exception { INVALID_STATE_ERR };
- // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
- for (size_t i = 0; i < audioTracks.size(); ++i) {
- audioTrack = audioTracks[i];
- if (audioTrack->source()->isAudioStreamSource()) {
- auto source = static_cast<MediaStreamAudioSource*>(audioTrack->source());
- ASSERT(!source->deviceId().isEmpty());
- destination()->enableInput(source->deviceId());
- provider = destination()->localAudioInputProvider();
+ MediaStreamTrack* providerTrack = nullptr;
+ for (auto& track : audioTracks) {
+ if (track->audioSourceProvider()) {
+ providerTrack = track.get();
break;
}
}
+ if (!providerTrack)
+ return Exception { INVALID_STATE_ERR };
- RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider);
+ lazyInitialize();
- // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
+ auto node = MediaStreamAudioSourceNode::create(*this, mediaStream, *providerTrack);
node->setFormat(2, sampleRate());
- refNode(node.get()); // context keeps reference until node is disconnected
- return node;
+ refNode(node); // context keeps reference until node is disconnected
+ return WTFMove(node);
}
-PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
+Ref<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
{
// FIXME: Add support for an optional argument which specifies the number of channels.
// FIXME: The default should probably be stereo instead of mono.
- return MediaStreamAudioDestinationNode::create(this, 1);
+ return MediaStreamAudioDestinationNode::create(*this, 1);
}
#endif
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionCode& ec)
-{
- // Set number of input/output channels to stereo by default.
- return createScriptProcessor(bufferSize, 2, 2, ec);
-}
-
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec)
-{
- // Set number of output channels to stereo by default.
- return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec);
-}
-
-PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
+ExceptionOr<Ref<ScriptProcessorNode>> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels)
{
ASSERT(isMainThread());
lazyInitialize();
- RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
+ auto node = ScriptProcessorNode::create(*this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
- if (!node.get()) {
- ec = INDEX_SIZE_ERR;
- return nullptr;
- }
+ if (!node)
+ return Exception { INDEX_SIZE_ERR };
- refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
- return node;
+ refNode(*node); // context keeps reference until we stop making javascript rendering callbacks
+ return node.releaseNonNull();
}
-PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
+Ref<BiquadFilterNode> AudioContext::createBiquadFilter()
{
ASSERT(isMainThread());
lazyInitialize();
- return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
+ return BiquadFilterNode::create(*this, m_destinationNode->sampleRate());
}
-PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
+Ref<WaveShaperNode> AudioContext::createWaveShaper()
{
ASSERT(isMainThread());
lazyInitialize();
- return WaveShaperNode::create(this);
+ return WaveShaperNode::create(*this);
}
-PassRefPtr<PannerNode> AudioContext::createPanner()
+Ref<PannerNode> AudioContext::createPanner()
{
ASSERT(isMainThread());
lazyInitialize();
- return PannerNode::create(this, m_destinationNode->sampleRate());
+ return PannerNode::create(*this, m_destinationNode->sampleRate());
}
-PassRefPtr<ConvolverNode> AudioContext::createConvolver()
+Ref<ConvolverNode> AudioContext::createConvolver()
{
ASSERT(isMainThread());
lazyInitialize();
- return ConvolverNode::create(this, m_destinationNode->sampleRate());
+ return ConvolverNode::create(*this, m_destinationNode->sampleRate());
}
-PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
+Ref<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
{
ASSERT(isMainThread());
lazyInitialize();
- return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
+ return DynamicsCompressorNode::create(*this, m_destinationNode->sampleRate());
}
-PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
+Ref<AnalyserNode> AudioContext::createAnalyser()
{
ASSERT(isMainThread());
lazyInitialize();
- return AnalyserNode::create(this, m_destinationNode->sampleRate());
+ return AnalyserNode::create(*this, m_destinationNode->sampleRate());
}
-PassRefPtr<GainNode> AudioContext::createGain()
+Ref<GainNode> AudioContext::createGain()
{
ASSERT(isMainThread());
lazyInitialize();
- return GainNode::create(this, m_destinationNode->sampleRate());
+ return GainNode::create(*this, m_destinationNode->sampleRate());
}
-PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionCode& ec)
-{
- const double defaultMaxDelayTime = 1;
- return createDelay(defaultMaxDelayTime, ec);
-}
-
-PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCode& ec)
+ExceptionOr<Ref<DelayNode>> AudioContext::createDelay(double maxDelayTime)
{
ASSERT(isMainThread());
lazyInitialize();
- RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec);
- if (ec)
- return nullptr;
- return node;
+ return DelayNode::create(*this, m_destinationNode->sampleRate(), maxDelayTime);
}
-PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionCode& ec)
-{
- const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
- return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec);
-}
-
-PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec)
+ExceptionOr<Ref<ChannelSplitterNode>> AudioContext::createChannelSplitter(size_t numberOfOutputs)
{
ASSERT(isMainThread());
lazyInitialize();
-
- RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
-
- if (!node.get()) {
- ec = SYNTAX_ERR;
- return nullptr;
- }
-
- return node;
+ auto node = ChannelSplitterNode::create(*this, m_destinationNode->sampleRate(), numberOfOutputs);
+ if (!node)
+ return Exception { INDEX_SIZE_ERR };
+ return node.releaseNonNull();
}
-PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& ec)
-{
- const unsigned ChannelMergerDefaultNumberOfInputs = 6;
- return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec);
-}
-
-PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec)
+ExceptionOr<Ref<ChannelMergerNode>> AudioContext::createChannelMerger(size_t numberOfInputs)
{
ASSERT(isMainThread());
lazyInitialize();
-
- RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
-
- if (!node.get()) {
- ec = SYNTAX_ERR;
- return nullptr;
- }
-
- return node;
+ auto node = ChannelMergerNode::create(*this, m_destinationNode->sampleRate(), numberOfInputs);
+ if (!node)
+ return Exception { INDEX_SIZE_ERR };
+ return node.releaseNonNull();
}
-PassRefPtr<OscillatorNode> AudioContext::createOscillator()
+Ref<OscillatorNode> AudioContext::createOscillator()
{
ASSERT(isMainThread());
lazyInitialize();
- RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
+ Ref<OscillatorNode> node = OscillatorNode::create(*this, m_destinationNode->sampleRate());
// Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
// When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
- refNode(node.get());
+ refNode(node);
return node;
}
-PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
+ExceptionOr<Ref<PeriodicWave>> AudioContext::createPeriodicWave(Float32Array& real, Float32Array& imaginary)
{
ASSERT(isMainThread());
-
- if (!real || !imag || (real->length() != imag->length() || (real->length() > MaxPeriodicWaveLength) || (real->length() <= 0))) {
- ec = SYNTAX_ERR;
- return nullptr;
- }
-
+ if (real.length() != imaginary.length() || (real.length() > MaxPeriodicWaveLength) || !real.length())
+ return Exception { INDEX_SIZE_ERR };
lazyInitialize();
- return PeriodicWave::create(sampleRate(), real, imag);
+ return PeriodicWave::create(sampleRate(), real, imaginary);
}
void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
@@ -623,40 +577,36 @@ void AudioContext::derefFinishedSourceNodes()
{
ASSERT(isGraphOwner());
ASSERT(isAudioThread() || isAudioThreadFinished());
- for (unsigned i = 0; i < m_finishedNodes.size(); i++)
- derefNode(m_finishedNodes[i]);
+ for (auto& node : m_finishedNodes)
+ derefNode(*node);
m_finishedNodes.clear();
}
-void AudioContext::refNode(AudioNode* node)
+void AudioContext::refNode(AudioNode& node)
{
ASSERT(isMainThread());
AutoLocker locker(*this);
- node->ref(AudioNode::RefTypeConnection);
- m_referencedNodes.append(node);
+ node.ref(AudioNode::RefTypeConnection);
+ m_referencedNodes.append(&node);
}
-void AudioContext::derefNode(AudioNode* node)
+void AudioContext::derefNode(AudioNode& node)
{
ASSERT(isGraphOwner());
- node->deref(AudioNode::RefTypeConnection);
+ node.deref(AudioNode::RefTypeConnection);
- for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
- if (node == m_referencedNodes[i]) {
- m_referencedNodes.remove(i);
- break;
- }
- }
+ ASSERT(m_referencedNodes.contains(&node));
+ m_referencedNodes.removeFirst(&node);
}
void AudioContext::derefUnfinishedSourceNodes()
{
ASSERT(isMainThread() && isAudioThreadFinished());
- for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
- m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
+ for (auto& node : m_referencedNodes)
+ node->deref(AudioNode::RefTypeConnection);
m_referencedNodes.clear();
}
@@ -788,10 +738,8 @@ void AudioContext::handlePostRenderTasks()
void AudioContext::handleDeferredFinishDerefs()
{
ASSERT(isAudioThread() && isGraphOwner());
- for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
- AudioNode* node = m_deferredFinishDerefList[i];
+ for (auto& node : m_deferredFinishDerefList)
node->finishDeref(AudioNode::RefTypeConnection);
- }
m_deferredFinishDerefList.clear();
}
@@ -826,36 +774,23 @@ void AudioContext::scheduleNodeDeletion()
m_isDeletionScheduled = true;
- // Don't let ourself get deleted before the callback.
- // See matching deref() in deleteMarkedNodesDispatch().
- ref();
- callOnMainThread(deleteMarkedNodesDispatch, this);
+ callOnMainThread([protectedThis = makeRef(*this)]() mutable {
+ protectedThis->deleteMarkedNodes();
+ });
}
}
-void AudioContext::deleteMarkedNodesDispatch(void* userData)
-{
- AudioContext* context = reinterpret_cast<AudioContext*>(userData);
- ASSERT(context);
- if (!context)
- return;
-
- context->deleteMarkedNodes();
- context->deref();
-}
-
void AudioContext::deleteMarkedNodes()
{
ASSERT(isMainThread());
// Protect this object from being deleted before we release the mutex locked by AutoLocker.
- Ref<AudioContext> protect(*this);
+ Ref<AudioContext> protectedThis(*this);
{
AutoLocker locker(*this);
- while (size_t n = m_nodesToDelete.size()) {
- AudioNode* node = m_nodesToDelete[n - 1];
- m_nodesToDelete.removeLast();
+ while (m_nodesToDelete.size()) {
+ AudioNode* node = m_nodesToDelete.takeLast();
// Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
unsigned numberOfInputs = node->numberOfInputs();
@@ -897,8 +832,8 @@ void AudioContext::handleDirtyAudioSummingJunctions()
{
ASSERT(isGraphOwner());
- for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
- (*i)->updateRenderingState();
+ for (auto& junction : m_dirtySummingJunctions)
+ junction->updateRenderingState();
m_dirtySummingJunctions.clear();
}
@@ -907,8 +842,8 @@ void AudioContext::handleDirtyAudioNodeOutputs()
{
ASSERT(isGraphOwner());
- for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
- (*i)->updateRenderingState();
+ for (auto& output : m_dirtyAudioNodeOutputs)
+ output->updateRenderingState();
m_dirtyAudioNodeOutputs.clear();
}
@@ -937,11 +872,9 @@ void AudioContext::updateAutomaticPullNodes()
// Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
- unsigned j = 0;
- for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
- AudioNode* output = *i;
- m_renderingAutomaticPullNodes[j] = output;
- }
+ unsigned i = 0;
+ for (auto& output : m_automaticPullNodes)
+ m_renderingAutomaticPullNodes[i++] = output;
m_automaticPullNodesNeedUpdating = false;
}
@@ -951,8 +884,8 @@ void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
{
ASSERT(isAudioThread());
- for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
- m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
+ for (auto& node : m_renderingAutomaticPullNodes)
+ node->processIfNecessary(framesToProcess);
}
ScriptExecutionContext* AudioContext::scriptExecutionContext() const
@@ -960,24 +893,97 @@ ScriptExecutionContext* AudioContext::scriptExecutionContext() const
return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
}
-void AudioContext::startRendering()
+void AudioContext::nodeWillBeginPlayback()
+{
+ // Called by scheduled AudioNodes when clients schedule their start times.
+ // Prior to the introduction of suspend(), resume(), and stop(), starting
+ // a scheduled AudioNode would remove the user-gesture restriction, if present,
+ // and would thus unmute the context. Now that AudioContext stays in the
+ // "suspended" state if a user-gesture restriction is present, starting a
+ // schedule AudioNode should set the state to "running", but only if the
+ // user-gesture restriction is set.
+ if (userGestureRequiredForAudioStart())
+ startRendering();
+}
+
+bool AudioContext::willBeginPlayback()
{
- if (ScriptController::processingUserGesture())
+ if (userGestureRequiredForAudioStart()) {
+ if (!ScriptController::processingUserGestureForMedia())
+ return false;
removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
+ }
if (pageConsentRequiredForAudioStart()) {
Page* page = document()->page();
- if (page && !page->canStartMedia())
+ if (page && !page->canStartMedia()) {
document()->addMediaCanStartListener(this);
- else
- removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+ return false;
+ }
+ removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+ }
+
+ return m_mediaSession->clientWillBeginPlayback();
+}
+
+bool AudioContext::willPausePlayback()
+{
+ if (userGestureRequiredForAudioStart()) {
+ if (!ScriptController::processingUserGestureForMedia())
+ return false;
+ removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
}
+
+ if (pageConsentRequiredForAudioStart()) {
+ Page* page = document()->page();
+ if (page && !page->canStartMedia()) {
+ document()->addMediaCanStartListener(this);
+ return false;
+ }
+ removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+ }
+
+ return m_mediaSession->clientWillPausePlayback();
+}
+
+void AudioContext::startRendering()
+{
+ if (!willBeginPlayback())
+ return;
+
destination()->startRendering();
+ setState(State::Running);
}
-void AudioContext::mediaCanStart()
+void AudioContext::mediaCanStart(Document& document)
{
+ ASSERT_UNUSED(document, &document == this->document());
removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
+ mayResumePlayback(true);
+}
+
+MediaProducer::MediaStateFlags AudioContext::mediaState() const
+{
+ if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
+ return MediaProducer::IsPlayingAudio;
+
+ return MediaProducer::IsNotPlaying;
+}
+
+void AudioContext::pageMutedStateDidChange()
+{
+ if (m_destinationNode && document()->page())
+ m_destinationNode->setMuted(document()->page()->isAudioMuted());
+}
+
+void AudioContext::isPlayingAudioDidChange()
+{
+ // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
+ // we could be on the audio I/O thread here and the call into WebCore could block.
+ callOnMainThread([protectedThis = makeRef(*this)] {
+ if (protectedThis->document())
+ protectedThis->document()->updateIsPlayingMedia();
+ });
}
void AudioContext::fireCompletionEvent()
@@ -987,6 +993,7 @@ void AudioContext::fireCompletionEvent()
return;
AudioBuffer* renderedBuffer = m_renderTarget.get();
+ setState(State::Closed);
ASSERT(renderedBuffer);
if (!renderedBuffer)
@@ -995,7 +1002,7 @@ void AudioContext::fireCompletionEvent()
// Avoid firing the event if the document has already gone away.
if (scriptExecutionContext()) {
// Call the offline rendering completion event listener.
- dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
+ m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
}
}
@@ -1009,6 +1016,127 @@ void AudioContext::decrementActiveSourceCount()
--m_activeSourceCount;
}
+void AudioContext::suspend(DOMPromise<void>&& promise)
+{
+ if (isOfflineContext()) {
+ promise.reject(INVALID_STATE_ERR);
+ return;
+ }
+
+ if (m_state == State::Suspended) {
+ promise.resolve();
+ return;
+ }
+
+ if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
+ promise.reject();
+ return;
+ }
+
+ addReaction(State::Suspended, WTFMove(promise));
+
+ if (!willPausePlayback())
+ return;
+
+ lazyInitialize();
+
+ m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
+ setState(State::Suspended);
+ });
+}
+
+void AudioContext::resume(DOMPromise<void>&& promise)
+{
+ if (isOfflineContext()) {
+ promise.reject(INVALID_STATE_ERR);
+ return;
+ }
+
+ if (m_state == State::Running) {
+ promise.resolve();
+ return;
+ }
+
+ if (m_state == State::Closed || !m_destinationNode) {
+ promise.reject();
+ return;
+ }
+
+ addReaction(State::Running, WTFMove(promise));
+
+ if (!willBeginPlayback())
+ return;
+
+ lazyInitialize();
+
+ m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
+ setState(State::Running);
+ });
+}
+
+void AudioContext::close(DOMPromise<void>&& promise)
+{
+ if (isOfflineContext()) {
+ promise.reject(INVALID_STATE_ERR);
+ return;
+ }
+
+ if (m_state == State::Closed || !m_destinationNode) {
+ promise.resolve();
+ return;
+ }
+
+ addReaction(State::Closed, WTFMove(promise));
+
+ lazyInitialize();
+
+ m_destinationNode->close([this, protectedThis = makeRef(*this)] {
+ setState(State::Closed);
+ uninitialize();
+ });
+}
+
+
+void AudioContext::suspendPlayback()
+{
+ if (!m_destinationNode || m_state == State::Closed)
+ return;
+
+ if (m_state == State::Suspended) {
+ if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
+ setState(State::Interrupted);
+ return;
+ }
+
+ lazyInitialize();
+
+ m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
+ bool interrupted = m_mediaSession->state() == PlatformMediaSession::Interrupted;
+ setState(interrupted ? State::Interrupted : State::Suspended);
+ });
+}
+
+void AudioContext::mayResumePlayback(bool shouldResume)
+{
+ if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
+ return;
+
+ if (!shouldResume) {
+ setState(State::Suspended);
+ return;
+ }
+
+ if (!willBeginPlayback())
+ return;
+
+ lazyInitialize();
+
+ m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
+ setState(State::Running);
+ });
+}
+
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/Source/WebCore/Modules/webaudio/AudioContext.h b/Source/WebCore/Modules/webaudio/AudioContext.h
index 1e965d9ad..c631f1f19 100644
--- a/Source/WebCore/Modules/webaudio/AudioContext.h
+++ b/Source/WebCore/Modules/webaudio/AudioContext.h
@@ -1,5 +1,6 @@
/*
- * Copyright (C) 2010, Google Inc. All rights reserved.
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -22,8 +23,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioContext_h
-#define AudioContext_h
+#pragma once
#include "ActiveDOMObject.h"
#include "AsyncAudioDecoder.h"
@@ -31,12 +31,13 @@
#include "AudioDestinationNode.h"
#include "EventListener.h"
#include "EventTarget.h"
+#include "JSDOMPromise.h"
#include "MediaCanStartListener.h"
+#include "MediaProducer.h"
+#include "PlatformMediaSession.h"
#include <atomic>
#include <wtf/HashSet.h>
#include <wtf/MainThread.h>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefCounted.h>
#include <wtf/RefPtr.h>
#include <wtf/ThreadSafeRefCounted.h>
#include <wtf/Threading.h>
@@ -45,41 +46,39 @@
namespace WebCore {
+class AnalyserNode;
class AudioBuffer;
class AudioBufferCallback;
class AudioBufferSourceNode;
-class MediaElementAudioSourceNode;
-class MediaStreamAudioDestinationNode;
-class MediaStreamAudioSourceNode;
-class HRTFDatabaseLoader;
-class HTMLMediaElement;
-class ChannelMergerNode;
-class ChannelSplitterNode;
-class GainNode;
-class PannerNode;
class AudioListener;
class AudioSummingJunction;
class BiquadFilterNode;
+class ChannelMergerNode;
+class ChannelSplitterNode;
+class ConvolverNode;
class DelayNode;
class Document;
-class ConvolverNode;
class DynamicsCompressorNode;
-class AnalyserNode;
-class WaveShaperNode;
-class ScriptProcessorNode;
+class GainNode;
+class GenericEventQueue;
+class HTMLMediaElement;
+class MediaElementAudioSourceNode;
+class MediaStream;
+class MediaStreamAudioDestinationNode;
+class MediaStreamAudioSourceNode;
class OscillatorNode;
+class PannerNode;
class PeriodicWave;
+class ScriptProcessorNode;
+class WaveShaperNode;
// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism.
-class AudioContext : public ActiveDOMObject, public ThreadSafeRefCounted<AudioContext>, public EventTargetWithInlineData, public MediaCanStartListener {
+class AudioContext : public ActiveDOMObject, public ThreadSafeRefCounted<AudioContext>, public EventTargetWithInlineData, public MediaCanStartListener, public MediaProducer, private PlatformMediaSessionClient {
public:
// Create an AudioContext for rendering to the audio hardware.
- static PassRefPtr<AudioContext> create(Document&, ExceptionCode&);
-
- // Create an AudioContext for offline (non-realtime) rendering.
- static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
+ static RefPtr<AudioContext> create(Document&);
virtual ~AudioContext();
@@ -87,16 +86,10 @@ public:
bool isOfflineContext() { return m_isOfflineContext; }
- // Returns true when initialize() was called AND all asynchronous initialization has completed.
- bool isRunnable() const;
-
- HRTFDatabaseLoader* hrtfDatabaseLoader() const { return m_hrtfDatabaseLoader.get(); }
-
- // Document notification
- virtual void stop() override;
-
Document* document() const; // ASSERTs if document no longer exists.
+ const Document* hostingDocument() const override;
+
AudioDestinationNode* destination() { return m_destinationNode.get(); }
size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); }
double currentTime() const { return m_destinationNode->currentTime(); }
@@ -106,41 +99,46 @@ public:
void incrementActiveSourceCount();
void decrementActiveSourceCount();
- PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
- PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionCode&);
+ ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
+ ExceptionOr<Ref<AudioBuffer>> createBuffer(ArrayBuffer&, bool mixToMono);
// Asynchronous audio file data decoding.
- void decodeAudioData(ArrayBuffer*, PassRefPtr<AudioBufferCallback>, PassRefPtr<AudioBufferCallback>, ExceptionCode& ec);
+ void decodeAudioData(Ref<ArrayBuffer>&&, RefPtr<AudioBufferCallback>&&, RefPtr<AudioBufferCallback>&&);
AudioListener* listener() { return m_listener.get(); }
+ using ActiveDOMObject::suspend;
+ using ActiveDOMObject::resume;
+
+ void suspend(DOMPromise<void>&&);
+ void resume(DOMPromise<void>&&);
+ void close(DOMPromise<void>&&);
+
+ enum class State { Suspended, Running, Interrupted, Closed };
+ State state() const;
+
// The AudioNode create methods are called on the main thread (from JavaScript).
- PassRefPtr<AudioBufferSourceNode> createBufferSource();
+ Ref<AudioBufferSourceNode> createBufferSource();
#if ENABLE(VIDEO)
- PassRefPtr<MediaElementAudioSourceNode> createMediaElementSource(HTMLMediaElement*, ExceptionCode&);
+ ExceptionOr<Ref<MediaElementAudioSourceNode>> createMediaElementSource(HTMLMediaElement&);
#endif
#if ENABLE(MEDIA_STREAM)
- PassRefPtr<MediaStreamAudioSourceNode> createMediaStreamSource(MediaStream*, ExceptionCode&);
- PassRefPtr<MediaStreamAudioDestinationNode> createMediaStreamDestination();
+ ExceptionOr<Ref<MediaStreamAudioSourceNode>> createMediaStreamSource(MediaStream&);
+ Ref<MediaStreamAudioDestinationNode> createMediaStreamDestination();
#endif
- PassRefPtr<GainNode> createGain();
- PassRefPtr<BiquadFilterNode> createBiquadFilter();
- PassRefPtr<WaveShaperNode> createWaveShaper();
- PassRefPtr<DelayNode> createDelay(ExceptionCode&);
- PassRefPtr<DelayNode> createDelay(double maxDelayTime, ExceptionCode&);
- PassRefPtr<PannerNode> createPanner();
- PassRefPtr<ConvolverNode> createConvolver();
- PassRefPtr<DynamicsCompressorNode> createDynamicsCompressor();
- PassRefPtr<AnalyserNode> createAnalyser();
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, ExceptionCode&);
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode&);
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode&);
- PassRefPtr<ChannelSplitterNode> createChannelSplitter(ExceptionCode&);
- PassRefPtr<ChannelSplitterNode> createChannelSplitter(size_t numberOfOutputs, ExceptionCode&);
- PassRefPtr<ChannelMergerNode> createChannelMerger(ExceptionCode&);
- PassRefPtr<ChannelMergerNode> createChannelMerger(size_t numberOfInputs, ExceptionCode&);
- PassRefPtr<OscillatorNode> createOscillator();
- PassRefPtr<PeriodicWave> createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode&);
+ Ref<GainNode> createGain();
+ Ref<BiquadFilterNode> createBiquadFilter();
+ Ref<WaveShaperNode> createWaveShaper();
+ ExceptionOr<Ref<DelayNode>> createDelay(double maxDelayTime);
+ Ref<PannerNode> createPanner();
+ Ref<ConvolverNode> createConvolver();
+ Ref<DynamicsCompressorNode> createDynamicsCompressor();
+ Ref<AnalyserNode> createAnalyser();
+ ExceptionOr<Ref<ScriptProcessorNode>> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels);
+ ExceptionOr<Ref<ChannelSplitterNode>> createChannelSplitter(size_t numberOfOutputs);
+ ExceptionOr<Ref<ChannelMergerNode>> createChannelMerger(size_t numberOfInputs);
+ Ref<OscillatorNode> createOscillator();
+ ExceptionOr<Ref<PeriodicWave>> createPeriodicWave(Float32Array& real, Float32Array& imaginary);
// When a source node has no more processing to do (has finished playing), then it tells the context to dereference it.
void notifyNodeFinishedProcessing(AudioNode*);
@@ -198,8 +196,8 @@ public:
// Returns true if this thread owns the context's lock.
bool isGraphOwner() const;
- // Returns the maximum numuber of channels we can support.
- static unsigned maxNumberOfChannels() { return MaxNumberOfChannels;}
+ // Returns the maximum number of channels we can support.
+ static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; }
class AutoLocker {
public:
@@ -234,14 +232,12 @@ public:
void removeMarkedSummingJunction(AudioSummingJunction*);
// EventTarget
- virtual EventTargetInterface eventTargetInterface() const override final { return AudioContextEventTargetInterfaceType; }
- virtual ScriptExecutionContext* scriptExecutionContext() const override final;
-
- DEFINE_ATTRIBUTE_EVENT_LISTENER(complete);
+ EventTargetInterface eventTargetInterface() const final { return AudioContextEventTargetInterfaceType; }
+ ScriptExecutionContext* scriptExecutionContext() const final;
// Reconcile ref/deref which are defined both in ThreadSafeRefCounted and EventTarget.
- using ThreadSafeRefCounted<AudioContext>::ref;
- using ThreadSafeRefCounted<AudioContext>::deref;
+ using ThreadSafeRefCounted::ref;
+ using ThreadSafeRefCounted::deref;
void startRendering();
void fireCompletionEvent();
@@ -256,12 +252,14 @@ public:
};
typedef unsigned BehaviorRestrictions;
- bool userGestureRequiredForAudioStart() const { return m_restrictions & RequireUserGestureForAudioStartRestriction; }
- bool pageConsentRequiredForAudioStart() const { return m_restrictions & RequirePageConsentForAudioStartRestriction; }
-
+ BehaviorRestrictions behaviorRestrictions() const { return m_restrictions; }
void addBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions |= restriction; }
void removeBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions &= ~restriction; }
+ void isPlayingAudioDidChange();
+
+ void nodeWillBeginPlayback();
+
protected:
explicit AudioContext(Document&);
AudioContext(Document&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
@@ -274,33 +272,62 @@ private:
void lazyInitialize();
void uninitialize();
- // ScriptExecutionContext calls stop twice.
- // We'd like to schedule only one stop action for them.
- bool m_isStopScheduled;
- static void stopDispatch(void* userData);
+ bool willBeginPlayback();
+ bool willPausePlayback();
+
+ bool userGestureRequiredForAudioStart() const { return m_restrictions & RequireUserGestureForAudioStartRestriction; }
+ bool pageConsentRequiredForAudioStart() const { return m_restrictions & RequirePageConsentForAudioStartRestriction; }
+
+ void setState(State);
+
void clear();
void scheduleNodeDeletion();
- static void deleteMarkedNodesDispatch(void* userData);
- virtual void mediaCanStart() override;
+ void mediaCanStart(Document&) override;
- bool m_isInitialized;
- bool m_isAudioThreadFinished;
+ // MediaProducer
+ MediaProducer::MediaStateFlags mediaState() const override;
+ void pageMutedStateDidChange() override;
// The context itself keeps a reference to all source nodes. The source nodes, then reference all nodes they're connected to.
// In turn, these nodes reference all nodes they're connected to. All nodes are ultimately connected to the AudioDestinationNode.
// When the context dereferences a source node, it will be deactivated from the rendering graph along with all other nodes it is
// uniquely connected to. See the AudioNode::ref() and AudioNode::deref() methods for more details.
- void refNode(AudioNode*);
- void derefNode(AudioNode*);
+ void refNode(AudioNode&);
+ void derefNode(AudioNode&);
+
+ // ActiveDOMObject API.
+ void stop() override;
+ bool canSuspendForDocumentSuspension() const override;
+ const char* activeDOMObjectName() const override;
// When the context goes away, there might still be some sources which haven't finished playing.
// Make sure to dereference them here.
void derefUnfinishedSourceNodes();
- RefPtr<AudioDestinationNode> m_destinationNode;
- RefPtr<AudioListener> m_listener;
+ // PlatformMediaSessionClient
+ PlatformMediaSession::MediaType mediaType() const override { return PlatformMediaSession::WebAudio; }
+ PlatformMediaSession::MediaType presentationType() const override { return PlatformMediaSession::WebAudio; }
+ PlatformMediaSession::CharacteristicsFlags characteristics() const override { return m_state == State::Running ? PlatformMediaSession::HasAudio : PlatformMediaSession::HasNothing; }
+ void mayResumePlayback(bool shouldResume) override;
+ void suspendPlayback() override;
+ bool canReceiveRemoteControlCommands() const override { return false; }
+ void didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType, const PlatformMediaSession::RemoteCommandArgument*) override { }
+ bool supportsSeeking() const override { return false; }
+ bool shouldOverrideBackgroundPlaybackRestriction(PlatformMediaSession::InterruptionType) const override { return false; }
+ String sourceApplicationIdentifier() const override;
+ bool canProduceAudio() const final { return true; }
+
+ // EventTarget
+ void refEventTarget() override { ref(); }
+ void derefEventTarget() override { deref(); }
+
+ void handleDirtyAudioSummingJunctions();
+ void handleDirtyAudioNodeOutputs();
+
+ void addReaction(State, DOMPromise<void>&&);
+ void updateAutomaticPullNodes();
// Only accessed in the audio thread.
Vector<AudioNode*> m_finishedNodes;
@@ -318,42 +345,39 @@ private:
// They will be scheduled for deletion (on the main thread) at the end of a render cycle (in realtime thread).
Vector<AudioNode*> m_nodesToDelete;
- bool m_isDeletionScheduled;
+
+ bool m_isDeletionScheduled { false };
+ bool m_isStopScheduled { false };
+ bool m_isInitialized { false };
+ bool m_isAudioThreadFinished { false };
+ bool m_automaticPullNodesNeedUpdating { false };
+ bool m_isOfflineContext { false };
// Only accessed when the graph lock is held.
HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
- void handleDirtyAudioSummingJunctions();
- void handleDirtyAudioNodeOutputs();
// For the sake of thread safety, we maintain a seperate Vector of automatic pull nodes for rendering in m_renderingAutomaticPullNodes.
// It will be copied from m_automaticPullNodes by updateAutomaticPullNodes() at the very start or end of the rendering quantum.
HashSet<AudioNode*> m_automaticPullNodes;
Vector<AudioNode*> m_renderingAutomaticPullNodes;
- // m_automaticPullNodesNeedUpdating keeps track if m_automaticPullNodes is modified.
- bool m_automaticPullNodesNeedUpdating;
- void updateAutomaticPullNodes();
-
- unsigned m_connectionCount;
-
- // Graph locking.
- Mutex m_contextGraphMutex;
- volatile ThreadIdentifier m_audioThread;
- volatile ThreadIdentifier m_graphOwnerThread; // if the lock is held then this is the thread which owns it, otherwise == UndefinedThreadIdentifier
-
// Only accessed in the audio thread.
Vector<AudioNode*> m_deferredFinishDerefList;
-
- // HRTF Database loader
- RefPtr<HRTFDatabaseLoader> m_hrtfDatabaseLoader;
+ Vector<Vector<DOMPromise<void>>> m_stateReactions;
- // EventTarget
- virtual void refEventTarget() override { ref(); }
- virtual void derefEventTarget() override { deref(); }
+ std::unique_ptr<PlatformMediaSession> m_mediaSession;
+ std::unique_ptr<GenericEventQueue> m_eventQueue;
RefPtr<AudioBuffer> m_renderTarget;
-
- bool m_isOfflineContext;
+ RefPtr<AudioDestinationNode> m_destinationNode;
+ RefPtr<AudioListener> m_listener;
+
+ unsigned m_connectionCount { 0 };
+
+ // Graph locking.
+ Lock m_contextGraphMutex;
+ volatile ThreadIdentifier m_audioThread { 0 };
+ volatile ThreadIdentifier m_graphOwnerThread; // if the lock is held then this is the thread which owns it, otherwise == UndefinedThreadIdentifier
AsyncAudioDecoder m_audioDecoder;
@@ -362,11 +386,28 @@ private:
enum { MaxNumberOfChannels = 32 };
// Number of AudioBufferSourceNodes that are active (playing).
- std::atomic<int> m_activeSourceCount;
+ std::atomic<int> m_activeSourceCount { 0 };
- BehaviorRestrictions m_restrictions;
+ BehaviorRestrictions m_restrictions { NoRestrictions };
+
+ State m_state { State::Suspended };
};
-} // WebCore
+// FIXME: Find out why these ==/!= functions are needed and remove them if possible.
-#endif // AudioContext_h
+inline bool operator==(const AudioContext& lhs, const AudioContext& rhs)
+{
+ return &lhs == &rhs;
+}
+
+inline bool operator!=(const AudioContext& lhs, const AudioContext& rhs)
+{
+ return &lhs != &rhs;
+}
+
+inline AudioContext::State AudioContext::state() const
+{
+ return m_state;
+}
+
+} // WebCore
diff --git a/Source/WebCore/Modules/webaudio/AudioContext.idl b/Source/WebCore/Modules/webaudio/AudioContext.idl
index 8d684299f..e5226a3f1 100644
--- a/Source/WebCore/Modules/webaudio/AudioContext.idl
+++ b/Source/WebCore/Modules/webaudio/AudioContext.idl
@@ -23,72 +23,78 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+enum AudioContextState {
+ "suspended",
+ "running",
+ "interrupted",
+ "closed"
+};
+
[
- EnabledBySetting=WebAudio,
- Conditional=WEB_AUDIO,
ActiveDOMObject,
- CustomConstructor,
- EventTarget,
+ Conditional=WEB_AUDIO,
+ Constructor,
+ ConstructorCallWith=Document,
+ EnabledBySetting=WebAudio,
+ ExportMacro=WEBCORE_EXPORT,
InterfaceName=webkitAudioContext,
-] interface AudioContext {
+] interface AudioContext : EventTarget {
// All rendered audio ultimately connects to destination, which represents the audio hardware.
readonly attribute AudioDestinationNode destination;
// All scheduled times are relative to this time in seconds.
- readonly attribute double currentTime;
+ readonly attribute unrestricted double currentTime;
// All AudioNodes in the context run at this sample-rate (sample-frames per second).
- readonly attribute float sampleRate;
+ readonly attribute unrestricted float sampleRate;
// All panning is relative to this listener.
readonly attribute AudioListener listener;
+ Promise<void> suspend();
+ Promise<void> resume();
+ Promise<void> close();
+
+ readonly attribute AudioContextState state;
+ attribute EventHandler onstatechange;
+
// Number of AudioBufferSourceNodes that are currently playing.
readonly attribute unsigned long activeSourceCount;
- [RaisesException] AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate);
- [RaisesException] AudioBuffer createBuffer(ArrayBuffer? buffer, boolean mixToMono);
+ [MayThrowException] AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long numberOfFrames, unrestricted float sampleRate);
+ [MayThrowException] AudioBuffer createBuffer(ArrayBuffer buffer, boolean mixToMono);
// Asynchronous audio file data decoding.
- [RaisesException] void decodeAudioData(ArrayBuffer audioData, AudioBufferCallback successCallback, optional AudioBufferCallback errorCallback);
+ // FIXME: successCallback should be optional and the callbacks should not be nullable. This should also return a Promise.
+ void decodeAudioData(ArrayBuffer audioData, AudioBufferCallback? successCallback, optional AudioBufferCallback? errorCallback);
// Sources
AudioBufferSourceNode createBufferSource();
-#if defined(ENABLE_VIDEO) && ENABLE_VIDEO
- [RaisesException] MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
-#endif
+ [Conditional=VIDEO, MayThrowException] MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
-#if defined(ENABLE_MEDIA_STREAM) && ENABLE_MEDIA_STREAM
- [RaisesException] MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
- MediaStreamAudioDestinationNode createMediaStreamDestination();
-#endif
+ [Conditional=MEDIA_STREAM, MayThrowException] MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
+ [Conditional=MEDIA_STREAM] MediaStreamAudioDestinationNode createMediaStreamDestination();
// Processing nodes
GainNode createGain();
- [RaisesException] DelayNode createDelay(optional double maxDelayTime);
+ [MayThrowException] DelayNode createDelay(optional unrestricted double maxDelayTime = 1);
BiquadFilterNode createBiquadFilter();
WaveShaperNode createWaveShaper();
PannerNode createPanner();
ConvolverNode createConvolver();
DynamicsCompressorNode createDynamicsCompressor();
AnalyserNode createAnalyser();
- [RaisesException] ScriptProcessorNode createScriptProcessor(unsigned long bufferSize, optional unsigned long numberOfInputChannels, optional unsigned long numberOfOutputChannels);
+ [MayThrowException] ScriptProcessorNode createScriptProcessor(unsigned long bufferSize, optional unsigned long numberOfInputChannels = 2, optional unsigned long numberOfOutputChannels = 2);
OscillatorNode createOscillator();
- [RaisesException] PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
+ [MayThrowException] PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
// Channel splitting and merging
- [RaisesException] ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs);
- [RaisesException] ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs);
+ [MayThrowException] ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
+ [MayThrowException] ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
// Offline rendering
- // void prepareOfflineBufferRendering(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate);
- attribute EventListener oncomplete;
+ // void prepareOfflineBufferRendering(unsigned long numberOfChannels, unsigned long numberOfFrames, unrestricted float sampleRate);
+ attribute EventHandler oncomplete;
void startRendering();
-
- [Conditional=LEGACY_WEB_AUDIO, ImplementedAs=createGain] GainNode createGainNode();
- [Conditional=LEGACY_WEB_AUDIO, ImplementedAs=createDelay, RaisesException] DelayNode createDelayNode(optional double maxDelayTime);
-
- [Conditional=LEGACY_WEB_AUDIO, ImplementedAs=createScriptProcessor, RaisesException] ScriptProcessorNode createJavaScriptNode(unsigned long bufferSize, optional unsigned long numberOfInputChannels, optional unsigned long numberOfOutputChannels);
-
};
diff --git a/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp b/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp
index cbe89a9a8..511281851 100644
--- a/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioDestinationNode.cpp
@@ -36,9 +36,12 @@
namespace WebCore {
-AudioDestinationNode::AudioDestinationNode(AudioContext* context, float sampleRate)
+AudioDestinationNode::AudioDestinationNode(AudioContext& context, float sampleRate)
: AudioNode(context, sampleRate)
, m_currentSampleFrame(0)
+ , m_isSilent(true)
+ , m_isEffectivelyPlayingAudio(false)
+ , m_muted(false)
{
addInput(std::make_unique<AudioNodeInput>(this));
@@ -50,36 +53,30 @@ AudioDestinationNode::~AudioDestinationNode()
uninitialize();
}
-void AudioDestinationNode::render(AudioBus* sourceBus, AudioBus* destinationBus, size_t numberOfFrames)
+void AudioDestinationNode::render(AudioBus*, AudioBus* destinationBus, size_t numberOfFrames)
{
// We don't want denormals slowing down any of the audio processing
// since they can very seriously hurt performance.
// This will take care of all AudioNodes because they all process within this scope.
DenormalDisabler denormalDisabler;
- context()->setAudioThread(currentThread());
+ context().setAudioThread(currentThread());
- if (!context()->isRunnable()) {
+ if (!context().isInitialized()) {
destinationBus->zero();
+ setIsSilent(true);
return;
}
- if (context()->userGestureRequiredForAudioStart()) {
- destinationBus->zero();
- return;
- }
-
- if (context()->pageConsentRequiredForAudioStart()) {
+ ASSERT(numberOfFrames);
+ if (!numberOfFrames) {
destinationBus->zero();
+ setIsSilent(true);
return;
}
// Let the context take care of any business at the start of each render quantum.
- context()->handlePreRenderTasks();
-
- // Prepare the local audio input provider for this render quantum.
- if (sourceBus)
- m_localAudioInputProvider.set(sourceBus);
+ context().handlePreRenderTasks();
// This will cause the node(s) connected to us to process, which in turn will pull on their input(s),
// all the way backwards through the rendering graph.
@@ -93,13 +90,44 @@ void AudioDestinationNode::render(AudioBus* sourceBus, AudioBus* destinationBus,
}
// Process nodes which need a little extra help because they are not connected to anything, but still need to process.
- context()->processAutomaticPullNodes(numberOfFrames);
+ context().processAutomaticPullNodes(numberOfFrames);
// Let the context take care of any business at the end of each render quantum.
- context()->handlePostRenderTasks();
+ context().handlePostRenderTasks();
// Advance current sample-frame.
m_currentSampleFrame += numberOfFrames;
+
+ setIsSilent(destinationBus->isSilent());
+
+ // The reason we are handling mute after the call to setIsSilent() is because the muted state does
+ // not affect the audio destination node's effective playing state.
+ if (m_muted)
+ destinationBus->zero();
+}
+
+void AudioDestinationNode::isPlayingDidChange()
+{
+ updateIsEffectivelyPlayingAudio();
+}
+
+void AudioDestinationNode::setIsSilent(bool isSilent)
+{
+ if (m_isSilent == isSilent)
+ return;
+
+ m_isSilent = isSilent;
+ updateIsEffectivelyPlayingAudio();
+}
+
+void AudioDestinationNode::updateIsEffectivelyPlayingAudio()
+{
+ bool isEffectivelyPlayingAudio = isPlaying() && !m_isSilent;
+ if (m_isEffectivelyPlayingAudio == isEffectivelyPlayingAudio)
+ return;
+
+ m_isEffectivelyPlayingAudio = isEffectivelyPlayingAudio;
+ context().isPlayingAudioDidChange();
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/AudioDestinationNode.h b/Source/WebCore/Modules/webaudio/AudioDestinationNode.h
index e742c605e..9ebaf1399 100644
--- a/Source/WebCore/Modules/webaudio/AudioDestinationNode.h
+++ b/Source/WebCore/Modules/webaudio/AudioDestinationNode.h
@@ -22,83 +22,63 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioDestinationNode_h
-#define AudioDestinationNode_h
+#pragma once
#include "AudioBuffer.h"
#include "AudioBus.h"
#include "AudioIOCallback.h"
#include "AudioNode.h"
#include "AudioSourceProvider.h"
+#include <wtf/Function.h>
namespace WebCore {
-class AudioBus;
class AudioContext;
-
+
class AudioDestinationNode : public AudioNode, public AudioIOCallback {
public:
- AudioDestinationNode(AudioContext*, float sampleRate);
+ AudioDestinationNode(AudioContext&, float sampleRate);
virtual ~AudioDestinationNode();
// AudioNode
- virtual void process(size_t) override { }; // we're pulled by hardware so this is never called
- virtual void reset() override { m_currentSampleFrame = 0; }
+ void process(size_t) override { }; // we're pulled by hardware so this is never called
+ void reset() override { m_currentSampleFrame = 0; }
// The audio hardware calls render() to get the next render quantum of audio into destinationBus.
// It will optionally give us local/live audio input in sourceBus (if it's not 0).
- virtual void render(AudioBus* sourceBus, AudioBus* destinationBus, size_t numberOfFrames) override;
+ void render(AudioBus* sourceBus, AudioBus* destinationBus, size_t numberOfFrames) override;
size_t currentSampleFrame() const { return m_currentSampleFrame; }
double currentTime() const { return currentSampleFrame() / static_cast<double>(sampleRate()); }
- virtual unsigned long maxChannelCount() const { return 0; }
+ virtual unsigned maxChannelCount() const { return 0; }
// Enable local/live input for the specified device.
virtual void enableInput(const String& inputDeviceId) = 0;
virtual void startRendering() = 0;
+ virtual void resume(WTF::Function<void ()>&&) { }
+ virtual void suspend(WTF::Function<void ()>&&) { }
+ virtual void close(WTF::Function<void ()>&&) { }
- AudioSourceProvider* localAudioInputProvider() { return &m_localAudioInputProvider; }
-
-protected:
- // LocalAudioInputProvider allows us to expose an AudioSourceProvider for local/live audio input.
- // If there is local/live audio input, we call set() with the audio input data every render quantum.
- class LocalAudioInputProvider : public AudioSourceProvider {
- public:
- LocalAudioInputProvider()
- : m_sourceBus(AudioBus::create(2, AudioNode::ProcessingSizeInFrames)) // FIXME: handle non-stereo local input.
- {
- }
-
- void set(AudioBus* bus)
- {
- if (bus)
- m_sourceBus->copyFrom(*bus);
- }
-
- // AudioSourceProvider.
- virtual void provideInput(AudioBus* destinationBus, size_t numberOfFrames) override
- {
- bool isGood = destinationBus && destinationBus->length() == numberOfFrames && m_sourceBus->length() == numberOfFrames;
- ASSERT(isGood);
- if (isGood)
- destinationBus->copyFrom(*m_sourceBus);
- }
+ virtual bool isPlaying() { return false; }
+ void isPlayingDidChange() override;
+ bool isPlayingAudio() const { return m_isEffectivelyPlayingAudio; }
+ void setMuted(bool muted) { m_muted = muted; }
- private:
- RefPtr<AudioBus> m_sourceBus;
- };
+protected:
+ double tailTime() const override { return 0; }
+ double latencyTime() const override { return 0; }
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ void setIsSilent(bool);
+ void updateIsEffectivelyPlayingAudio();
// Counts the number of sample-frames processed by the destination.
size_t m_currentSampleFrame;
- LocalAudioInputProvider m_localAudioInputProvider;
+ bool m_isSilent;
+ bool m_isEffectivelyPlayingAudio;
+ bool m_muted;
};
} // namespace WebCore
-
-#endif // AudioDestinationNode_h
diff --git a/Source/WebCore/Modules/webaudio/AudioListener.cpp b/Source/WebCore/Modules/webaudio/AudioListener.cpp
index 2f1ef764d..476d92d68 100644
--- a/Source/WebCore/Modules/webaudio/AudioListener.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioListener.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/Modules/webaudio/AudioListener.h b/Source/WebCore/Modules/webaudio/AudioListener.h
index 8b5d8ad1a..12e79779d 100644
--- a/Source/WebCore/Modules/webaudio/AudioListener.h
+++ b/Source/WebCore/Modules/webaudio/AudioListener.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,11 +26,10 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioListener_h
-#define AudioListener_h
+#pragma once
#include "FloatPoint3D.h"
-#include <wtf/PassRefPtr.h>
+#include <wtf/Ref.h>
#include <wtf/RefCounted.h>
namespace WebCore {
@@ -39,9 +38,9 @@ namespace WebCore {
class AudioListener : public RefCounted<AudioListener> {
public:
- static PassRefPtr<AudioListener> create()
+ static Ref<AudioListener> create()
{
- return adoptRef(new AudioListener());
+ return adoptRef(*new AudioListener);
}
// Position
@@ -89,6 +88,4 @@ private:
double m_speedOfSound;
};
-} // WebCore
-
-#endif // AudioListener_h
+} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/AudioListener.idl b/Source/WebCore/Modules/webaudio/AudioListener.idl
index 8ec2a9073..17018b52b 100644
--- a/Source/WebCore/Modules/webaudio/AudioListener.idl
+++ b/Source/WebCore/Modules/webaudio/AudioListener.idl
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,10 +30,10 @@
Conditional=WEB_AUDIO,
ImplementationLacksVTable,
] interface AudioListener {
- attribute float dopplerFactor; // same as OpenAL (default 1.0)
- attribute float speedOfSound; // in meters / second (default 343.3)
+ attribute unrestricted float dopplerFactor; // same as OpenAL (default 1.0)
+ attribute unrestricted float speedOfSound; // in meters / second (default 343.3)
- void setPosition(float x, float y, float z);
- void setOrientation(float x, float y, float z, float xUp, float yUp, float zUp);
- void setVelocity(float x, float y, float z);
+ void setPosition(unrestricted float x, unrestricted float y, unrestricted float z);
+ void setOrientation(unrestricted float x, unrestricted float y, unrestricted float z, unrestricted float xUp, unrestricted float yUp, unrestricted float zUp);
+ void setVelocity(unrestricted float x, unrestricted float y, unrestricted float z);
};
diff --git a/Source/WebCore/Modules/webaudio/AudioNode.cpp b/Source/WebCore/Modules/webaudio/AudioNode.cpp
index 514ec7a3e..d0d42335f 100644
--- a/Source/WebCore/Modules/webaudio/AudioNode.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioNode.cpp
@@ -42,7 +42,7 @@
namespace WebCore {
-AudioNode::AudioNode(AudioContext* context, float sampleRate)
+AudioNode::AudioNode(AudioContext& context, float sampleRate)
: m_isInitialized(false)
, m_nodeType(NodeTypeUnknown)
, m_context(context)
@@ -67,6 +67,7 @@ AudioNode::AudioNode(AudioContext* context, float sampleRate)
AudioNode::~AudioNode()
{
+ ASSERT(isMainThread());
#if DEBUG_AUDIONODE_REFERENCES
--s_nodeCount[nodeType()];
fprintf(stderr, "%p: %d: AudioNode::~AudioNode() %d %d\n", this, nodeType(), m_normalRefCount.load(), m_connectionRefCount);
@@ -100,12 +101,12 @@ void AudioNode::lazyInitialize()
void AudioNode::addInput(std::unique_ptr<AudioNodeInput> input)
{
- m_inputs.append(std::move(input));
+ m_inputs.append(WTFMove(input));
}
void AudioNode::addOutput(std::unique_ptr<AudioNodeOutput> output)
{
- m_outputs.append(std::move(output));
+ m_outputs.append(WTFMove(output));
}
AudioNodeInput* AudioNode::input(unsigned i)
@@ -122,117 +123,103 @@ AudioNodeOutput* AudioNode::output(unsigned i)
return nullptr;
}
-void AudioNode::connect(AudioNode* destination, unsigned outputIndex, unsigned inputIndex, ExceptionCode& ec)
+ExceptionOr<void> AudioNode::connect(AudioNode& destination, unsigned outputIndex, unsigned inputIndex)
{
ASSERT(isMainThread());
- AudioContext::AutoLocker locker(*context());
-
- if (!destination) {
- ec = SYNTAX_ERR;
- return;
- }
+ AudioContext::AutoLocker locker(context());
// Sanity check input and output indices.
- if (outputIndex >= numberOfOutputs()) {
- ec = INDEX_SIZE_ERR;
- return;
- }
+ if (outputIndex >= numberOfOutputs())
+ return Exception { INDEX_SIZE_ERR };
- if (destination && inputIndex >= destination->numberOfInputs()) {
- ec = INDEX_SIZE_ERR;
- return;
- }
+ if (inputIndex >= destination.numberOfInputs())
+ return Exception { INDEX_SIZE_ERR };
- if (context() != destination->context()) {
- ec = SYNTAX_ERR;
- return;
- }
+ if (context() != destination.context())
+ return Exception { SYNTAX_ERR };
- AudioNodeInput* input = destination->input(inputIndex);
- AudioNodeOutput* output = this->output(outputIndex);
+ auto* input = destination.input(inputIndex);
+ auto* output = this->output(outputIndex);
input->connect(output);
// Let context know that a connection has been made.
- context()->incrementConnectionCount();
+ context().incrementConnectionCount();
+
+ return { };
}
-void AudioNode::connect(AudioParam* param, unsigned outputIndex, ExceptionCode& ec)
+ExceptionOr<void> AudioNode::connect(AudioParam& param, unsigned outputIndex)
{
ASSERT(isMainThread());
- AudioContext::AutoLocker locker(*context());
+ AudioContext::AutoLocker locker(context());
- if (!param) {
- ec = SYNTAX_ERR;
- return;
- }
+ if (outputIndex >= numberOfOutputs())
+ return Exception { INDEX_SIZE_ERR };
- if (outputIndex >= numberOfOutputs()) {
- ec = INDEX_SIZE_ERR;
- return;
- }
+ if (context() != param.context())
+ return Exception { SYNTAX_ERR };
- if (context() != param->context()) {
- ec = SYNTAX_ERR;
- return;
- }
+ auto* output = this->output(outputIndex);
+ param.connect(output);
- AudioNodeOutput* output = this->output(outputIndex);
- param->connect(output);
+ return { };
}
-void AudioNode::disconnect(unsigned outputIndex, ExceptionCode& ec)
+ExceptionOr<void> AudioNode::disconnect(unsigned outputIndex)
{
ASSERT(isMainThread());
- AudioContext::AutoLocker locker(*context());
+ AudioContext::AutoLocker locker(context());
// Sanity check input and output indices.
- if (outputIndex >= numberOfOutputs()) {
- ec = INDEX_SIZE_ERR;
- return;
- }
+ if (outputIndex >= numberOfOutputs())
+ return Exception { INDEX_SIZE_ERR };
- AudioNodeOutput* output = this->output(outputIndex);
+ auto* output = this->output(outputIndex);
output->disconnectAll();
+
+ return { };
}
-unsigned long AudioNode::channelCount()
+unsigned AudioNode::channelCount()
{
return m_channelCount;
}
-void AudioNode::setChannelCount(unsigned long channelCount, ExceptionCode& ec)
+ExceptionOr<void> AudioNode::setChannelCount(unsigned channelCount)
{
ASSERT(isMainThread());
- AudioContext::AutoLocker locker(*context());
+ AudioContext::AutoLocker locker(context());
- if (channelCount > 0 && channelCount <= AudioContext::maxNumberOfChannels()) {
- if (m_channelCount != channelCount) {
- m_channelCount = channelCount;
- if (m_channelCountMode != Max)
- updateChannelsForInputs();
- }
- } else
- ec = INVALID_STATE_ERR;
+ if (!(channelCount > 0 && channelCount <= AudioContext::maxNumberOfChannels()))
+ return Exception { INVALID_STATE_ERR };
+
+ if (m_channelCount == channelCount)
+ return { };
+
+ m_channelCount = channelCount;
+ if (m_channelCountMode != Max)
+ updateChannelsForInputs();
+ return { };
}
String AudioNode::channelCountMode()
{
switch (m_channelCountMode) {
case Max:
- return "max";
+ return ASCIILiteral("max");
case ClampedMax:
- return "clamped-max";
+ return ASCIILiteral("clamped-max");
case Explicit:
- return "explicit";
+ return ASCIILiteral("explicit");
}
ASSERT_NOT_REACHED();
- return "";
+ return emptyString();
}
-void AudioNode::setChannelCountMode(const String& mode, ExceptionCode& ec)
+ExceptionOr<void> AudioNode::setChannelCountMode(const String& mode)
{
ASSERT(isMainThread());
- AudioContext::AutoLocker locker(*context());
+ AudioContext::AutoLocker locker(context());
ChannelCountMode oldMode = m_channelCountMode;
@@ -243,41 +230,45 @@ void AudioNode::setChannelCountMode(const String& mode, ExceptionCode& ec)
else if (mode == "explicit")
m_channelCountMode = Explicit;
else
- ec = INVALID_STATE_ERR;
+ return Exception { INVALID_STATE_ERR };
if (m_channelCountMode != oldMode)
updateChannelsForInputs();
+
+ return { };
}
String AudioNode::channelInterpretation()
{
switch (m_channelInterpretation) {
case AudioBus::Speakers:
- return "speakers";
+ return ASCIILiteral("speakers");
case AudioBus::Discrete:
- return "discrete";
+ return ASCIILiteral("discrete");
}
ASSERT_NOT_REACHED();
- return "";
+ return emptyString();
}
-void AudioNode::setChannelInterpretation(const String& interpretation, ExceptionCode& ec)
+ExceptionOr<void> AudioNode::setChannelInterpretation(const String& interpretation)
{
ASSERT(isMainThread());
- AudioContext::AutoLocker locker(*context());
+ AudioContext::AutoLocker locker(context());
if (interpretation == "speakers")
m_channelInterpretation = AudioBus::Speakers;
else if (interpretation == "discrete")
m_channelInterpretation = AudioBus::Discrete;
else
- ec = INVALID_STATE_ERR;
+ return Exception { INVALID_STATE_ERR };
+
+ return { };
}
void AudioNode::updateChannelsForInputs()
{
- for (unsigned i = 0; i < m_inputs.size(); ++i)
- input(i)->changedOutputs();
+ for (auto& input : m_inputs)
+ input->changedOutputs();
}
EventTargetInterface AudioNode::eventTargetInterface() const
@@ -287,12 +278,12 @@ EventTargetInterface AudioNode::eventTargetInterface() const
ScriptExecutionContext* AudioNode::scriptExecutionContext() const
{
- return const_cast<AudioNode*>(this)->context()->scriptExecutionContext();
+ return const_cast<AudioNode*>(this)->context().scriptExecutionContext();
}
void AudioNode::processIfNecessary(size_t framesToProcess)
{
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
if (!isInitialized())
return;
@@ -301,7 +292,7 @@ void AudioNode::processIfNecessary(size_t framesToProcess)
// This handles the "fanout" problem where an output is connected to multiple inputs.
// The first time we're called during this time slice we process, but after that we don't want to re-process,
// instead our output(s) will already have the results cached in their bus;
- double currentTime = context()->currentTime();
+ double currentTime = context().currentTime();
if (m_lastProcessingTime != currentTime) {
m_lastProcessingTime = currentTime; // important to first update this time because of feedback loops in the rendering graph
@@ -309,7 +300,7 @@ void AudioNode::processIfNecessary(size_t framesToProcess)
bool silentInputs = inputsAreSilent();
if (!silentInputs)
- m_lastNonSilentTime = (context()->currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate);
+ m_lastNonSilentTime = (context().currentSampleFrame() + framesToProcess) / static_cast<double>(m_sampleRate);
if (silentInputs && propagatesSilence())
silenceOutputs();
@@ -322,9 +313,9 @@ void AudioNode::processIfNecessary(size_t framesToProcess)
void AudioNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
{
- ASSERT(context()->isAudioThread() && context()->isGraphOwner());
+ ASSERT(context().isAudioThread() && context().isGraphOwner());
- for (const std::unique_ptr<AudioNodeInput>& savedInput : m_inputs) {
+ for (auto& savedInput : m_inputs) {
if (input == savedInput.get()) {
input->updateInternalBus();
return;
@@ -336,22 +327,22 @@ void AudioNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
bool AudioNode::propagatesSilence() const
{
- return m_lastNonSilentTime + latencyTime() + tailTime() < context()->currentTime();
+ return m_lastNonSilentTime + latencyTime() + tailTime() < context().currentTime();
}
void AudioNode::pullInputs(size_t framesToProcess)
{
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
// Process all of the AudioNodes connected to our inputs.
- for (unsigned i = 0; i < m_inputs.size(); ++i)
- input(i)->pull(0, framesToProcess);
+ for (auto& input : m_inputs)
+ input->pull(0, framesToProcess);
}
bool AudioNode::inputsAreSilent()
{
- for (unsigned i = 0; i < m_inputs.size(); ++i) {
- if (!input(i)->bus()->isSilent())
+ for (auto& input : m_inputs) {
+ if (!input->bus()->isSilent())
return false;
}
return true;
@@ -359,25 +350,25 @@ bool AudioNode::inputsAreSilent()
void AudioNode::silenceOutputs()
{
- for (unsigned i = 0; i < m_outputs.size(); ++i)
- output(i)->bus()->zero();
+ for (auto& output : m_outputs)
+ output->bus()->zero();
}
void AudioNode::unsilenceOutputs()
{
- for (unsigned i = 0; i < m_outputs.size(); ++i)
- output(i)->bus()->clearSilentFlag();
+ for (auto& output : m_outputs)
+ output->bus()->clearSilentFlag();
}
void AudioNode::enableOutputsIfNecessary()
{
if (m_isDisabled && m_connectionRefCount > 0) {
ASSERT(isMainThread());
- AudioContext::AutoLocker locker(*context());
+ AudioContext::AutoLocker locker(context());
m_isDisabled = false;
- for (unsigned i = 0; i < m_outputs.size(); ++i)
- output(i)->enable();
+ for (auto& output : m_outputs)
+ output->enable();
}
}
@@ -401,8 +392,8 @@ void AudioNode::disableOutputsIfNecessary()
// longer any active connections.
if (nodeType() != NodeTypeConvolver && nodeType() != NodeTypeDelay) {
m_isDisabled = true;
- for (unsigned i = 0; i < m_outputs.size(); ++i)
- output(i)->disable();
+ for (auto& output : m_outputs)
+ output->disable();
}
}
}
@@ -438,11 +429,11 @@ void AudioNode::deref(RefType refType)
bool hasLock = false;
bool mustReleaseLock = false;
- if (context()->isAudioThread()) {
+ if (context().isAudioThread()) {
// Real-time audio thread must not contend lock (to avoid glitches).
- hasLock = context()->tryLock(mustReleaseLock);
+ hasLock = context().tryLock(mustReleaseLock);
} else {
- context()->lock(mustReleaseLock);
+ context().lock(mustReleaseLock);
hasLock = true;
}
@@ -451,24 +442,24 @@ void AudioNode::deref(RefType refType)
finishDeref(refType);
if (mustReleaseLock)
- context()->unlock();
+ context().unlock();
} else {
// We were unable to get the lock, so put this in a list to finish up later.
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
ASSERT(refType == RefTypeConnection);
- context()->addDeferredFinishDeref(this);
+ context().addDeferredFinishDeref(this);
}
// Once AudioContext::uninitialize() is called there's no more chances for deleteMarkedNodes() to get called, so we call here.
// We can't call in AudioContext::~AudioContext() since it will never be called as long as any AudioNode is alive
// because AudioNodes keep a reference to the context.
- if (context()->isAudioThreadFinished())
- context()->deleteMarkedNodes();
+ if (context().isAudioThreadFinished())
+ context().deleteMarkedNodes();
}
void AudioNode::finishDeref(RefType refType)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
switch (refType) {
case RefTypeNormal:
@@ -491,11 +482,11 @@ void AudioNode::finishDeref(RefType refType)
if (!m_normalRefCount) {
if (!m_isMarkedForDeletion) {
// All references are gone - we need to go away.
- for (unsigned i = 0; i < m_outputs.size(); ++i)
- output(i)->disconnectAll(); // This will deref() nodes we're connected to.
+ for (auto& output : m_outputs)
+ output->disconnectAll(); // This will deref() nodes we're connected to.
// Mark for deletion at end of each render quantum or when context shuts down.
- context()->markForDeletion(this);
+ context().markForDeletion(this);
m_isMarkedForDeletion = true;
}
} else if (refType == RefTypeConnection)
diff --git a/Source/WebCore/Modules/webaudio/AudioNode.h b/Source/WebCore/Modules/webaudio/AudioNode.h
index 1536056a0..72f8a4af7 100644
--- a/Source/WebCore/Modules/webaudio/AudioNode.h
+++ b/Source/WebCore/Modules/webaudio/AudioNode.h
@@ -22,15 +22,12 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioNode_h
-#define AudioNode_h
+#pragma once
#include "AudioBus.h"
#include "EventTarget.h"
-#include <memory>
+#include "ExceptionOr.h"
#include <wtf/Forward.h>
-#include <wtf/RefPtr.h>
-#include <wtf/Vector.h>
#define DEBUG_AUDIONODE_REFERENCES 0
@@ -41,8 +38,6 @@ class AudioNodeInput;
class AudioNodeOutput;
class AudioParam;
-typedef int ExceptionCode;
-
// An AudioNode is the basic building block for handling audio within an AudioContext.
// It may be an audio source, an intermediate processing module, or an audio destination.
// Each AudioNode can have inputs and/or outputs. An AudioSourceNode has no inputs and a single output.
@@ -53,11 +48,11 @@ class AudioNode : public EventTargetWithInlineData {
public:
enum { ProcessingSizeInFrames = 128 };
- AudioNode(AudioContext*, float sampleRate);
+ AudioNode(AudioContext&, float sampleRate);
virtual ~AudioNode();
- AudioContext* context() { return m_context.get(); }
- const AudioContext* context() const { return m_context.get(); }
+ AudioContext& context() { return m_context.get(); }
+ const AudioContext& context() const { return m_context.get(); }
enum NodeType {
NodeTypeUnknown,
@@ -126,9 +121,9 @@ public:
AudioNodeOutput* output(unsigned);
// Called from main thread by corresponding JavaScript methods.
- virtual void connect(AudioNode*, unsigned outputIndex, unsigned inputIndex, ExceptionCode&);
- void connect(AudioParam*, unsigned outputIndex, ExceptionCode&);
- virtual void disconnect(unsigned outputIndex, ExceptionCode&);
+ virtual ExceptionOr<void> connect(AudioNode&, unsigned outputIndex, unsigned inputIndex);
+ ExceptionOr<void> connect(AudioParam&, unsigned outputIndex);
+ virtual ExceptionOr<void> disconnect(unsigned outputIndex);
virtual float sampleRate() const { return m_sampleRate; }
@@ -166,21 +161,21 @@ public:
void enableOutputsIfNecessary();
void disableOutputsIfNecessary();
- unsigned long channelCount();
- virtual void setChannelCount(unsigned long, ExceptionCode&);
+ unsigned channelCount();
+ virtual ExceptionOr<void> setChannelCount(unsigned);
String channelCountMode();
- void setChannelCountMode(const String&, ExceptionCode&);
+ ExceptionOr<void> setChannelCountMode(const String&);
String channelInterpretation();
- void setChannelInterpretation(const String&, ExceptionCode&);
+ ExceptionOr<void> setChannelInterpretation(const String&);
ChannelCountMode internalChannelCountMode() const { return m_channelCountMode; }
AudioBus::ChannelInterpretation internalChannelInterpretation() const { return m_channelInterpretation; }
// EventTarget
- virtual EventTargetInterface eventTargetInterface() const override;
- virtual ScriptExecutionContext* scriptExecutionContext() const override final;
+ EventTargetInterface eventTargetInterface() const override;
+ ScriptExecutionContext* scriptExecutionContext() const final;
protected:
// Inputs and outputs must be created before the AudioNode is initialized.
@@ -198,7 +193,7 @@ protected:
private:
volatile bool m_isInitialized;
NodeType m_nodeType;
- RefPtr<AudioContext> m_context;
+ Ref<AudioContext> m_context;
float m_sampleRate;
Vector<std::unique_ptr<AudioNodeInput>> m_inputs;
Vector<std::unique_ptr<AudioNodeOutput>> m_outputs;
@@ -218,8 +213,8 @@ private:
static int s_nodeCount[NodeTypeEnd];
#endif
- virtual void refEventTarget() override { ref(); }
- virtual void derefEventTarget() override { deref(); }
+ void refEventTarget() override { ref(); }
+ void derefEventTarget() override { deref(); }
protected:
unsigned m_channelCount;
@@ -228,5 +223,3 @@ protected:
};
} // namespace WebCore
-
-#endif // AudioNode_h
diff --git a/Source/WebCore/Modules/webaudio/AudioNode.idl b/Source/WebCore/Modules/webaudio/AudioNode.idl
index 9167e5789..2ba0500c7 100644
--- a/Source/WebCore/Modules/webaudio/AudioNode.idl
+++ b/Source/WebCore/Modules/webaudio/AudioNode.idl
@@ -24,30 +24,17 @@
[
Conditional=WEB_AUDIO,
- JSGenerateToJSObject,
- JSGenerateToNativeObject,
GenerateIsReachable=Impl,
- EventTarget,
] interface AudioNode : EventTarget {
readonly attribute AudioContext context;
readonly attribute unsigned long numberOfInputs;
readonly attribute unsigned long numberOfOutputs;
- [SetterRaisesException] attribute unsigned long channelCount;
+ [SetterMayThrowException] attribute unsigned long channelCount;
+ [SetterMayThrowException] attribute DOMString channelCountMode;
+ [SetterMayThrowException] attribute DOMString channelInterpretation;
- [SetterRaisesException] attribute DOMString channelCountMode;
-
- [SetterRaisesException] attribute DOMString channelInterpretation;
-
- [RaisesException] void connect(AudioNode? destination, [Default=Undefined] optional unsigned long output, [Default=Undefined] optional unsigned long input);
-
- [RaisesException] void connect(AudioParam? destination, [Default=Undefined] optional unsigned long output);
-
- [RaisesException] void disconnect([Default=Undefined] optional unsigned long output);
-
- void addEventListener(DOMString type, EventListener listener, optional boolean useCapture);
-
- void removeEventListener(DOMString type, EventListener listener, optional boolean useCapture);
-
- [RaisesException] boolean dispatchEvent(Event event);
+ [MayThrowException] void connect(AudioNode destination, optional unsigned long output = 0, optional unsigned long input = 0);
+ [MayThrowException] void connect(AudioParam destination, optional unsigned long output = 0);
+ [MayThrowException] void disconnect(optional unsigned long output = 0);
};
diff --git a/Source/WebCore/Modules/webaudio/AudioNodeInput.cpp b/Source/WebCore/Modules/webaudio/AudioNodeInput.cpp
index c62333d2b..ccaa51d82 100644
--- a/Source/WebCore/Modules/webaudio/AudioNodeInput.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioNodeInput.cpp
@@ -45,7 +45,7 @@ AudioNodeInput::AudioNodeInput(AudioNode* node)
void AudioNodeInput::connect(AudioNodeOutput* output)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(output && node());
if (!output || !node())
@@ -64,7 +64,7 @@ void AudioNodeInput::connect(AudioNodeOutput* output)
void AudioNodeInput::disconnect(AudioNodeOutput* output)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(output && node());
if (!output || !node())
@@ -90,7 +90,7 @@ void AudioNodeInput::disconnect(AudioNodeOutput* output)
void AudioNodeInput::disable(AudioNodeOutput* output)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(output && node());
if (!output || !node())
@@ -108,7 +108,7 @@ void AudioNodeInput::disable(AudioNodeOutput* output)
void AudioNodeInput::enable(AudioNodeOutput* output)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(output && node());
if (!output || !node())
@@ -132,7 +132,7 @@ void AudioNodeInput::didUpdate()
void AudioNodeInput::updateInternalBus()
{
- ASSERT(context()->isAudioThread() && context()->isGraphOwner());
+ ASSERT(context().isAudioThread() && context().isGraphOwner());
unsigned numberOfInputChannels = numberOfChannels();
@@ -151,8 +151,7 @@ unsigned AudioNodeInput::numberOfChannels() const
// Find the number of channels of the connection with the largest number of channels.
unsigned maxChannels = 1; // one channel is the minimum allowed
- for (HashSet<AudioNodeOutput*>::iterator i = m_outputs.begin(); i != m_outputs.end(); ++i) {
- AudioNodeOutput* output = *i;
+ for (auto& output : m_outputs) {
// Use output()->numberOfChannels() instead of output->bus()->numberOfChannels(),
// because the calling of AudioNodeOutput::bus() is not safe here.
maxChannels = std::max(maxChannels, output->numberOfChannels());
@@ -166,7 +165,7 @@ unsigned AudioNodeInput::numberOfChannels() const
AudioBus* AudioNodeInput::bus()
{
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
// Handle single connection specially to allow for in-place processing.
if (numberOfRenderingConnections() == 1 && node()->internalChannelCountMode() == AudioNode::Max)
@@ -178,14 +177,14 @@ AudioBus* AudioNodeInput::bus()
AudioBus* AudioNodeInput::internalSummingBus()
{
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
return m_internalSummingBus.get();
}
void AudioNodeInput::sumAllConnections(AudioBus* summingBus, size_t framesToProcess)
{
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
// We shouldn't be calling this method if there's only one connection, since it's less efficient.
ASSERT(numberOfRenderingConnections() > 1 || node()->internalChannelCountMode() != AudioNode::Max);
@@ -198,8 +197,7 @@ void AudioNodeInput::sumAllConnections(AudioBus* summingBus, size_t framesToProc
AudioBus::ChannelInterpretation interpretation = node()->internalChannelInterpretation();
- for (unsigned i = 0; i < numberOfRenderingConnections(); ++i) {
- AudioNodeOutput* output = renderingOutput(i);
+ for (auto& output : m_renderingOutputs) {
ASSERT(output);
// Render audio from this output.
@@ -212,7 +210,7 @@ void AudioNodeInput::sumAllConnections(AudioBus* summingBus, size_t framesToProc
AudioBus* AudioNodeInput::pull(AudioBus* inPlaceBus, size_t framesToProcess)
{
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
// Handle single connection case.
if (numberOfRenderingConnections() == 1 && node()->internalChannelCountMode() == AudioNode::Max) {
diff --git a/Source/WebCore/Modules/webaudio/AudioNodeInput.h b/Source/WebCore/Modules/webaudio/AudioNodeInput.h
index 5afcded37..3e8cc40ed 100644
--- a/Source/WebCore/Modules/webaudio/AudioNodeInput.h
+++ b/Source/WebCore/Modules/webaudio/AudioNodeInput.h
@@ -22,14 +22,12 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioNodeInput_h
-#define AudioNodeInput_h
+#pragma once
#include "AudioBus.h"
#include "AudioNode.h"
#include "AudioSummingJunction.h"
#include <wtf/HashSet.h>
-#include <wtf/Vector.h>
namespace WebCore {
@@ -45,8 +43,8 @@ public:
explicit AudioNodeInput(AudioNode*);
// AudioSummingJunction
- virtual bool canUpdateState() override { return !node()->isMarkedForDeletion(); }
- virtual void didUpdate() override;
+ bool canUpdateState() override { return !node()->isMarkedForDeletion(); }
+ void didUpdate() override;
// Can be called from any thread.
AudioNode* node() const { return m_node; }
@@ -95,5 +93,3 @@ private:
};
} // namespace WebCore
-
-#endif // AudioNodeInput_h
diff --git a/Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp b/Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp
index f9a4516f9..b810c27f4 100644
--- a/Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioNodeOutput.cpp
@@ -53,16 +53,16 @@ AudioNodeOutput::AudioNodeOutput(AudioNode* node, unsigned numberOfChannels)
void AudioNodeOutput::setNumberOfChannels(unsigned numberOfChannels)
{
ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
m_desiredNumberOfChannels = numberOfChannels;
- if (context()->isAudioThread()) {
+ if (context().isAudioThread()) {
// If we're in the audio thread then we can take care of it right away (we should be at the very start or end of a rendering quantum).
updateNumberOfChannels();
} else {
// Let the context take care of it in the audio thread in the pre and post render tasks.
- context()->markAudioNodeOutputDirty(this);
+ context().markAudioNodeOutputDirty(this);
}
}
@@ -83,7 +83,7 @@ void AudioNodeOutput::updateRenderingState()
void AudioNodeOutput::updateNumberOfChannels()
{
- ASSERT(context()->isAudioThread() && context()->isGraphOwner());
+ ASSERT(context().isAudioThread() && context().isGraphOwner());
if (m_numberOfChannels != m_desiredNumberOfChannels) {
m_numberOfChannels = m_desiredNumberOfChannels;
@@ -94,12 +94,11 @@ void AudioNodeOutput::updateNumberOfChannels()
void AudioNodeOutput::propagateChannelCount()
{
- ASSERT(context()->isAudioThread() && context()->isGraphOwner());
+ ASSERT(context().isAudioThread() && context().isGraphOwner());
if (isChannelCountKnown()) {
// Announce to any nodes we're connected to that we changed our channel count for its input.
- for (InputsIterator i = m_inputs.begin(); i != m_inputs.end(); ++i) {
- AudioNodeInput* input = *i;
+ for (auto& input : m_inputs) {
AudioNode* connectionNode = input->node();
connectionNode->checkNumberOfChannelsForInput(input);
}
@@ -108,7 +107,7 @@ void AudioNodeOutput::propagateChannelCount()
AudioBus* AudioNodeOutput::pull(AudioBus* inPlaceBus, size_t framesToProcess)
{
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
ASSERT(m_renderingFanOutCount > 0 || m_renderingParamFanOutCount > 0);
// Causes our AudioNode to process if it hasn't already for this render quantum.
@@ -127,19 +126,19 @@ AudioBus* AudioNodeOutput::pull(AudioBus* inPlaceBus, size_t framesToProcess)
AudioBus* AudioNodeOutput::bus() const
{
- ASSERT(const_cast<AudioNodeOutput*>(this)->context()->isAudioThread());
+ ASSERT(const_cast<AudioNodeOutput*>(this)->context().isAudioThread());
return m_isInPlace ? m_inPlaceBus.get() : m_internalBus.get();
}
unsigned AudioNodeOutput::fanOutCount()
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
return m_inputs.size();
}
unsigned AudioNodeOutput::paramFanOutCount()
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
return m_params.size();
}
@@ -155,7 +154,7 @@ unsigned AudioNodeOutput::renderingParamFanOutCount() const
void AudioNodeOutput::addInput(AudioNodeInput* input)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(input);
if (!input)
@@ -166,7 +165,7 @@ void AudioNodeOutput::addInput(AudioNodeInput* input)
void AudioNodeOutput::removeInput(AudioNodeInput* input)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(input);
if (!input)
@@ -177,7 +176,7 @@ void AudioNodeOutput::removeInput(AudioNodeInput* input)
void AudioNodeOutput::disconnectAllInputs()
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
// AudioNodeInput::disconnect() changes m_inputs by calling removeInput().
while (!m_inputs.isEmpty()) {
@@ -188,7 +187,7 @@ void AudioNodeOutput::disconnectAllInputs()
void AudioNodeOutput::addParam(AudioParam* param)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(param);
if (!param)
@@ -199,7 +198,7 @@ void AudioNodeOutput::addParam(AudioParam* param)
void AudioNodeOutput::removeParam(AudioParam* param)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(param);
if (!param)
@@ -210,7 +209,7 @@ void AudioNodeOutput::removeParam(AudioParam* param)
void AudioNodeOutput::disconnectAllParams()
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
// AudioParam::disconnect() changes m_params by calling removeParam().
while (!m_params.isEmpty()) {
@@ -227,26 +226,22 @@ void AudioNodeOutput::disconnectAll()
void AudioNodeOutput::disable()
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
if (m_isEnabled) {
- for (InputsIterator i = m_inputs.begin(); i != m_inputs.end(); ++i) {
- AudioNodeInput* input = *i;
+ for (auto& input : m_inputs)
input->disable(this);
- }
m_isEnabled = false;
}
}
void AudioNodeOutput::enable()
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
if (!m_isEnabled) {
- for (InputsIterator i = m_inputs.begin(); i != m_inputs.end(); ++i) {
- AudioNodeInput* input = *i;
+ for (auto& input : m_inputs)
input->enable(this);
- }
m_isEnabled = true;
}
}
diff --git a/Source/WebCore/Modules/webaudio/AudioNodeOutput.h b/Source/WebCore/Modules/webaudio/AudioNodeOutput.h
index ece31e4ef..e88a730ef 100644
--- a/Source/WebCore/Modules/webaudio/AudioNodeOutput.h
+++ b/Source/WebCore/Modules/webaudio/AudioNodeOutput.h
@@ -22,15 +22,13 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioNodeOutput_h
-#define AudioNodeOutput_h
+#pragma once
#include "AudioBus.h"
#include "AudioNode.h"
#include "AudioParam.h"
#include <wtf/HashSet.h>
#include <wtf/RefPtr.h>
-#include <wtf/Vector.h>
namespace WebCore {
@@ -47,7 +45,7 @@ public:
// Can be called from any thread.
AudioNode* node() const { return m_node; }
- AudioContext* context() { return m_node->context(); }
+ AudioContext& context() { return m_node->context(); }
// Causes our AudioNode to process if it hasn't already for this render quantum.
// It returns the bus containing the processed audio for this output, returning inPlaceBus if in-place processing was possible.
@@ -149,5 +147,3 @@ private:
};
} // namespace WebCore
-
-#endif // AudioNodeOutput_h
diff --git a/Source/WebCore/Modules/webaudio/AudioParam.cpp b/Source/WebCore/Modules/webaudio/AudioParam.cpp
index 3e4899f50..0ff0e2043 100644
--- a/Source/WebCore/Modules/webaudio/AudioParam.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioParam.cpp
@@ -43,7 +43,7 @@ const double AudioParam::SnapThreshold = 0.001;
float AudioParam::value()
{
// Update value for timeline.
- if (context() && context()->isAudioThread()) {
+ if (context().isAudioThread()) {
bool hasValue;
float timelineValue = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), hasValue);
@@ -72,9 +72,8 @@ bool AudioParam::smooth()
// If values have been explicitly scheduled on the timeline, then use the exact value.
// Smoothing effectively is performed by the timeline.
bool useTimelineValue = false;
- if (context())
- m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue);
-
+ m_value = m_timeline.valueForContextTime(context(), narrowPrecisionToFloat(m_value), useTimelineValue);
+
if (m_smoothedValue == m_value) {
// Smoothed value has already approached and snapped to value.
return true;
@@ -103,7 +102,7 @@ float AudioParam::finalValue()
void AudioParam::calculateSampleAccurateValues(float* values, unsigned numberOfValues)
{
- bool isSafe = context() && context()->isAudioThread() && values && numberOfValues;
+ bool isSafe = context().isAudioThread() && values && numberOfValues;
ASSERT(isSafe);
if (!isSafe)
return;
@@ -113,7 +112,7 @@ void AudioParam::calculateSampleAccurateValues(float* values, unsigned numberOfV
void AudioParam::calculateFinalValues(float* values, unsigned numberOfValues, bool sampleAccurate)
{
- bool isGood = context() && context()->isAudioThread() && values && numberOfValues;
+ bool isGood = context().isAudioThread() && values && numberOfValues;
ASSERT(isGood);
if (!isGood)
return;
@@ -139,8 +138,7 @@ void AudioParam::calculateFinalValues(float* values, unsigned numberOfValues, bo
RefPtr<AudioBus> summingBus = AudioBus::create(1, numberOfValues, false);
summingBus->setChannelMemory(0, values, numberOfValues);
- for (unsigned i = 0; i < numberOfRenderingConnections(); ++i) {
- AudioNodeOutput* output = renderingOutput(i);
+ for (auto& output : m_renderingOutputs) {
ASSERT(output);
// Render audio from this output.
@@ -155,8 +153,8 @@ void AudioParam::calculateTimelineValues(float* values, unsigned numberOfValues)
{
// Calculate values for this render quantum.
// Normally numberOfValues will equal AudioNode::ProcessingSizeInFrames (the render quantum size).
- double sampleRate = context()->sampleRate();
- double startTime = context()->currentTime();
+ double sampleRate = context().sampleRate();
+ double startTime = context().currentTime();
double endTime = startTime + numberOfValues / sampleRate;
// Note we're running control rate at the sample-rate.
@@ -166,7 +164,7 @@ void AudioParam::calculateTimelineValues(float* values, unsigned numberOfValues)
void AudioParam::connect(AudioNodeOutput* output)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(output);
if (!output)
@@ -181,7 +179,7 @@ void AudioParam::connect(AudioNodeOutput* output)
void AudioParam::disconnect(AudioNodeOutput* output)
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
ASSERT(output);
if (!output)
diff --git a/Source/WebCore/Modules/webaudio/AudioParam.h b/Source/WebCore/Modules/webaudio/AudioParam.h
index f8c79340d..b9ee132d4 100644
--- a/Source/WebCore/Modules/webaudio/AudioParam.h
+++ b/Source/WebCore/Modules/webaudio/AudioParam.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,15 +26,13 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioParam_h
-#define AudioParam_h
+#pragma once
#include "AudioContext.h"
#include "AudioParamTimeline.h"
#include "AudioSummingJunction.h"
#include <runtime/Float32Array.h>
#include <sys/types.h>
-#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/text/WTFString.h>
@@ -47,14 +45,14 @@ public:
static const double DefaultSmoothingConstant;
static const double SnapThreshold;
- static PassRefPtr<AudioParam> create(AudioContext* context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
+ static Ref<AudioParam> create(AudioContext& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
{
- return adoptRef(new AudioParam(context, name, defaultValue, minValue, maxValue, units));
+ return adoptRef(*new AudioParam(context, name, defaultValue, minValue, maxValue, units));
}
// AudioSummingJunction
- virtual bool canUpdateState() override { return true; }
- virtual void didUpdate() override { }
+ bool canUpdateState() override { return true; }
+ void didUpdate() override { }
// Intrinsic value.
float value();
@@ -89,7 +87,7 @@ public:
void linearRampToValueAtTime(float value, float time) { m_timeline.linearRampToValueAtTime(value, time); }
void exponentialRampToValueAtTime(float value, float time) { m_timeline.exponentialRampToValueAtTime(value, time); }
void setTargetAtTime(float target, float time, float timeConstant) { m_timeline.setTargetAtTime(target, time, timeConstant); }
- void setValueCurveAtTime(Float32Array* curve, float time, float duration) { m_timeline.setValueCurveAtTime(curve, time, duration); }
+ void setValueCurveAtTime(const RefPtr<Float32Array>& curve, float time, float duration) { m_timeline.setValueCurveAtTime(curve.get(), time, duration); }
void cancelScheduledValues(float startTime) { m_timeline.cancelScheduledValues(startTime); }
bool hasSampleAccurateValues() { return m_timeline.hasValues() || numberOfRenderingConnections(); }
@@ -103,7 +101,7 @@ public:
void disconnect(AudioNodeOutput*);
protected:
- AudioParam(AudioContext* context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
+ AudioParam(AudioContext& context, const String& name, double defaultValue, double minValue, double maxValue, unsigned units = 0)
: AudioSummingJunction(context)
, m_name(name)
, m_value(defaultValue)
@@ -136,5 +134,3 @@ private:
};
} // namespace WebCore
-
-#endif // AudioParam_h
diff --git a/Source/WebCore/Modules/webaudio/AudioParam.idl b/Source/WebCore/Modules/webaudio/AudioParam.idl
index 605462b46..cda277762 100644
--- a/Source/WebCore/Modules/webaudio/AudioParam.idl
+++ b/Source/WebCore/Modules/webaudio/AudioParam.idl
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -29,10 +29,10 @@
[
Conditional=WEB_AUDIO,
] interface AudioParam {
- attribute float value;
- readonly attribute float minValue;
- readonly attribute float maxValue;
- readonly attribute float defaultValue;
+ attribute unrestricted float value;
+ readonly attribute unrestricted float minValue;
+ readonly attribute unrestricted float maxValue;
+ readonly attribute unrestricted float defaultValue;
readonly attribute DOMString name;
@@ -40,20 +40,17 @@
readonly attribute unsigned short units;
// Parameter automation.
- void setValueAtTime(float value, float time);
- void linearRampToValueAtTime(float value, float time);
- void exponentialRampToValueAtTime(float value, float time);
+ void setValueAtTime(unrestricted float value, unrestricted float time);
+ void linearRampToValueAtTime(unrestricted float value, unrestricted float time);
+ void exponentialRampToValueAtTime(unrestricted float value, unrestricted float time);
// Exponentially approach the target with a rate having the given time constant.
- void setTargetAtTime(float target, float time, float timeConstant);
+ void setTargetAtTime(unrestricted float target, unrestricted float time, unrestricted float timeConstant);
// Sets an array of arbitrary parameter values starting at time for the given duration.
// The number of values will be scaled to fit into the desired duration.
- void setValueCurveAtTime(Float32Array values, float time, float duration);
+ void setValueCurveAtTime(Float32Array? values, unrestricted float time, unrestricted float duration); // FIXME: values should not be nullable.
// Cancels all scheduled parameter changes with times greater than or equal to startTime.
- void cancelScheduledValues(float startTime);
-
- [Conditional=LEGACY_WEB_AUDIO, ImplementedAs=setTargetAtTime] void setTargetValueAtTime(float targetValue, float time, float timeConstant);
-
+ void cancelScheduledValues(unrestricted float startTime);
};
diff --git a/Source/WebCore/Modules/webaudio/AudioParamTimeline.cpp b/Source/WebCore/Modules/webaudio/AudioParamTimeline.cpp
index 1c7e4e5f8..fdc1d3c7c 100644
--- a/Source/WebCore/Modules/webaudio/AudioParamTimeline.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioParamTimeline.cpp
@@ -80,19 +80,21 @@ void AudioParamTimeline::insertEvent(const ParamEvent& event)
if (!isValid)
return;
- std::lock_guard<std::mutex> lock(m_eventsMutex);
+ std::lock_guard<Lock> lock(m_eventsMutex);
unsigned i = 0;
float insertTime = event.time();
- for (i = 0; i < m_events.size(); ++i) {
+ for (auto& paramEvent : m_events) {
// Overwrite same event type and time.
- if (m_events[i].time() == insertTime && m_events[i].type() == event.type()) {
- m_events[i] = event;
+ if (paramEvent.time() == insertTime && paramEvent.type() == event.type()) {
+ paramEvent = event;
return;
}
- if (m_events[i].time() > insertTime)
+ if (paramEvent.time() > insertTime)
break;
+
+ ++i;
}
m_events.insert(i, event);
@@ -100,7 +102,7 @@ void AudioParamTimeline::insertEvent(const ParamEvent& event)
void AudioParamTimeline::cancelScheduledValues(float startTime)
{
- std::lock_guard<std::mutex> lock(m_eventsMutex);
+ std::lock_guard<Lock> lock(m_eventsMutex);
// Remove all events starting at startTime.
for (unsigned i = 0; i < m_events.size(); ++i) {
@@ -111,13 +113,11 @@ void AudioParamTimeline::cancelScheduledValues(float startTime)
}
}
-float AudioParamTimeline::valueForContextTime(AudioContext* context, float defaultValue, bool& hasValue)
+float AudioParamTimeline::valueForContextTime(AudioContext& context, float defaultValue, bool& hasValue)
{
- ASSERT(context);
-
{
- std::unique_lock<std::mutex> lock(m_eventsMutex, std::try_to_lock);
- if (!lock.owns_lock() || !context || !m_events.size() || context->currentTime() < m_events[0].time()) {
+ std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock);
+ if (!lock.owns_lock() || !m_events.size() || context.currentTime() < m_events[0].time()) {
hasValue = false;
return defaultValue;
}
@@ -125,8 +125,8 @@ float AudioParamTimeline::valueForContextTime(AudioContext* context, float defau
// Ask for just a single value.
float value;
- double sampleRate = context->sampleRate();
- double startTime = context->currentTime();
+ double sampleRate = context.sampleRate();
+ double startTime = context.currentTime();
double endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
double controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);
@@ -138,7 +138,7 @@ float AudioParamTimeline::valueForContextTime(AudioContext* context, float defau
float AudioParamTimeline::valuesForTimeRange(double startTime, double endTime, float defaultValue, float* values, unsigned numberOfValues, double sampleRate, double controlRate)
{
// We can't contend the lock in the realtime audio thread.
- std::unique_lock<std::mutex> lock(m_eventsMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock);
if (!lock.owns_lock()) {
if (values) {
for (unsigned i = 0; i < numberOfValues; ++i)
diff --git a/Source/WebCore/Modules/webaudio/AudioParamTimeline.h b/Source/WebCore/Modules/webaudio/AudioParamTimeline.h
index 4e9f0a4a3..148391731 100644
--- a/Source/WebCore/Modules/webaudio/AudioParamTimeline.h
+++ b/Source/WebCore/Modules/webaudio/AudioParamTimeline.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,13 +26,11 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioParamTimeline_h
-#define AudioParamTimeline_h
+#pragma once
#include "AudioContext.h"
-#include <mutex>
#include <runtime/Float32Array.h>
-#include <wtf/PassRefPtr.h>
+#include <wtf/Lock.h>
#include <wtf/RefCounted.h>
#include <wtf/Vector.h>
@@ -53,7 +51,7 @@ public:
// hasValue is set to true if a valid timeline value is returned.
// otherwise defaultValue is returned.
- float valueForContextTime(AudioContext*, float defaultValue, bool& hasValue);
+ float valueForContextTime(AudioContext&, float defaultValue, bool& hasValue);
// Given the time range, calculates parameter values into the values buffer
// and returns the last parameter value calculated for "values" or the defaultValue if none were calculated.
@@ -76,13 +74,13 @@ private:
LastType
};
- ParamEvent(Type type, float value, float time, float timeConstant, float duration, PassRefPtr<Float32Array> curve)
+ ParamEvent(Type type, float value, float time, float timeConstant, float duration, RefPtr<Float32Array>&& curve)
: m_type(type)
, m_value(value)
, m_time(time)
, m_timeConstant(timeConstant)
, m_duration(duration)
- , m_curve(curve)
+ , m_curve(WTFMove(curve))
{
}
@@ -107,9 +105,7 @@ private:
Vector<ParamEvent> m_events;
- std::mutex m_eventsMutex;
+ Lock m_eventsMutex;
};
} // namespace WebCore
-
-#endif // AudioParamTimeline_h
diff --git a/Source/WebCore/Modules/webaudio/AudioProcessingEvent.cpp b/Source/WebCore/Modules/webaudio/AudioProcessingEvent.cpp
index 2df9b1c9f..257250710 100644
--- a/Source/WebCore/Modules/webaudio/AudioProcessingEvent.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioProcessingEvent.cpp
@@ -33,24 +33,15 @@
namespace WebCore {
-PassRefPtr<AudioProcessingEvent> AudioProcessingEvent::create()
-{
- return adoptRef(new AudioProcessingEvent);
-}
-
-PassRefPtr<AudioProcessingEvent> AudioProcessingEvent::create(PassRefPtr<AudioBuffer> inputBuffer, PassRefPtr<AudioBuffer> outputBuffer)
-{
- return adoptRef(new AudioProcessingEvent(inputBuffer, outputBuffer));
-}
-
AudioProcessingEvent::AudioProcessingEvent()
{
}
-AudioProcessingEvent::AudioProcessingEvent(PassRefPtr<AudioBuffer> inputBuffer, PassRefPtr<AudioBuffer> outputBuffer)
+AudioProcessingEvent::AudioProcessingEvent(RefPtr<AudioBuffer>&& inputBuffer, RefPtr<AudioBuffer>&& outputBuffer, double playbackTime)
: Event(eventNames().audioprocessEvent, true, false)
- , m_inputBuffer(inputBuffer)
- , m_outputBuffer(outputBuffer)
+ , m_inputBuffer(WTFMove(inputBuffer))
+ , m_outputBuffer(WTFMove(outputBuffer))
+ , m_playbackTime(playbackTime)
{
}
diff --git a/Source/WebCore/Modules/webaudio/AudioProcessingEvent.h b/Source/WebCore/Modules/webaudio/AudioProcessingEvent.h
index 4b1f9eba8..cc8cce3aa 100644
--- a/Source/WebCore/Modules/webaudio/AudioProcessingEvent.h
+++ b/Source/WebCore/Modules/webaudio/AudioProcessingEvent.h
@@ -22,12 +22,10 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioProcessingEvent_h
-#define AudioProcessingEvent_h
+#pragma once
#include "AudioBuffer.h"
#include "Event.h"
-#include <wtf/PassRefPtr.h>
#include <wtf/RefPtr.h>
namespace WebCore {
@@ -36,24 +34,31 @@ class AudioBuffer;
class AudioProcessingEvent : public Event {
public:
- static PassRefPtr<AudioProcessingEvent> create();
- static PassRefPtr<AudioProcessingEvent> create(PassRefPtr<AudioBuffer> inputBuffer, PassRefPtr<AudioBuffer> outputBuffer);
+ static Ref<AudioProcessingEvent> create(RefPtr<AudioBuffer>&& inputBuffer, RefPtr<AudioBuffer>&& outputBuffer, double playbackTime)
+ {
+ return adoptRef(*new AudioProcessingEvent(WTFMove(inputBuffer), WTFMove(outputBuffer), playbackTime));
+ }
+
+ static Ref<AudioProcessingEvent> createForBindings()
+ {
+ return adoptRef(*new AudioProcessingEvent);
+ }
virtual ~AudioProcessingEvent();
AudioBuffer* inputBuffer() { return m_inputBuffer.get(); }
AudioBuffer* outputBuffer() { return m_outputBuffer.get(); }
+ double playbackTime() const { return m_playbackTime; }
- virtual EventInterface eventInterface() const override;
+ EventInterface eventInterface() const override;
private:
AudioProcessingEvent();
- AudioProcessingEvent(PassRefPtr<AudioBuffer> inputBuffer, PassRefPtr<AudioBuffer> outputBuffer);
+ AudioProcessingEvent(RefPtr<AudioBuffer>&& inputBuffer, RefPtr<AudioBuffer>&& outputBuffer, double playbackTime);
RefPtr<AudioBuffer> m_inputBuffer;
RefPtr<AudioBuffer> m_outputBuffer;
+ double m_playbackTime;
};
} // namespace WebCore
-
-#endif // AudioProcessingEvent_h
diff --git a/Source/WebCore/Modules/webaudio/AudioProcessingEvent.idl b/Source/WebCore/Modules/webaudio/AudioProcessingEvent.idl
index 5b498d2eb..d46b138c3 100644
--- a/Source/WebCore/Modules/webaudio/AudioProcessingEvent.idl
+++ b/Source/WebCore/Modules/webaudio/AudioProcessingEvent.idl
@@ -26,6 +26,7 @@
Conditional=WEB_AUDIO,
JSGenerateToJSObject
] interface AudioProcessingEvent : Event {
+ readonly attribute unrestricted double playbackTime;
readonly attribute AudioBuffer inputBuffer;
readonly attribute AudioBuffer outputBuffer;
};
diff --git a/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp b/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp
index 0dc2a829a..e2cfc9009 100644
--- a/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.cpp
@@ -31,6 +31,7 @@
#include "AudioContext.h"
#include "AudioUtilities.h"
#include "Event.h"
+#include "EventNames.h"
#include "ScriptController.h"
#include <algorithm>
#include <wtf/MathExtras.h>
@@ -43,24 +44,14 @@ namespace WebCore {
const double AudioScheduledSourceNode::UnknownTime = -1;
-AudioScheduledSourceNode::AudioScheduledSourceNode(AudioContext* context, float sampleRate)
+AudioScheduledSourceNode::AudioScheduledSourceNode(AudioContext& context, float sampleRate)
: AudioNode(context, sampleRate)
- , m_playbackState(UNSCHEDULED_STATE)
- , m_startTime(0)
, m_endTime(UnknownTime)
- , m_hasEndedListener(false)
{
}
-void AudioScheduledSourceNode::updateSchedulingInfo(size_t quantumFrameSize,
- AudioBus* outputBus,
- size_t& quantumFrameOffset,
- size_t& nonSilentFramesToProcess)
+void AudioScheduledSourceNode::updateSchedulingInfo(size_t quantumFrameSize, AudioBus& outputBus, size_t& quantumFrameOffset, size_t& nonSilentFramesToProcess)
{
- ASSERT(outputBus);
- if (!outputBus)
- return;
-
ASSERT(quantumFrameSize == AudioNode::ProcessingSizeInFrames);
if (quantumFrameSize != AudioNode::ProcessingSizeInFrames)
return;
@@ -71,7 +62,7 @@ void AudioScheduledSourceNode::updateSchedulingInfo(size_t quantumFrameSize,
// quantumEndFrame : End frame of the current time quantum.
// startFrame : Start frame for this source.
// endFrame : End frame for this source.
- size_t quantumStartFrame = context()->currentSampleFrame();
+ size_t quantumStartFrame = context().currentSampleFrame();
size_t quantumEndFrame = quantumStartFrame + quantumFrameSize;
size_t startFrame = AudioUtilities::timeToSampleFrame(m_startTime, sampleRate);
size_t endFrame = m_endTime == UnknownTime ? 0 : AudioUtilities::timeToSampleFrame(m_endTime, sampleRate);
@@ -82,7 +73,7 @@ void AudioScheduledSourceNode::updateSchedulingInfo(size_t quantumFrameSize,
if (m_playbackState == UNSCHEDULED_STATE || m_playbackState == FINISHED_STATE || startFrame >= quantumEndFrame) {
// Output silence.
- outputBus->zero();
+ outputBus.zero();
nonSilentFramesToProcess = 0;
return;
}
@@ -91,7 +82,7 @@ void AudioScheduledSourceNode::updateSchedulingInfo(size_t quantumFrameSize,
if (m_playbackState == SCHEDULED_STATE) {
// Increment the active source count only if we're transitioning from SCHEDULED_STATE to PLAYING_STATE.
m_playbackState = PLAYING_STATE;
- context()->incrementActiveSourceCount();
+ context().incrementActiveSourceCount();
}
quantumFrameOffset = startFrame > quantumStartFrame ? startFrame - quantumStartFrame : 0;
@@ -100,15 +91,15 @@ void AudioScheduledSourceNode::updateSchedulingInfo(size_t quantumFrameSize,
if (!nonSilentFramesToProcess) {
// Output silence.
- outputBus->zero();
+ outputBus.zero();
return;
}
// Handle silence before we start playing.
// Zero any initial frames representing silence leading up to a rendering start time in the middle of the quantum.
if (quantumFrameOffset) {
- for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
- memset(outputBus->channel(i)->mutableData(), 0, sizeof(float) * quantumFrameOffset);
+ for (unsigned i = 0; i < outputBus.numberOfChannels(); ++i)
+ memset(outputBus.channel(i)->mutableData(), 0, sizeof(float) * quantumFrameOffset);
}
// Handle silence after we're done playing.
@@ -127,89 +118,81 @@ void AudioScheduledSourceNode::updateSchedulingInfo(size_t quantumFrameSize,
else
nonSilentFramesToProcess -= framesToZero;
- for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
- memset(outputBus->channel(i)->mutableData() + zeroStartFrame, 0, sizeof(float) * framesToZero);
+ for (unsigned i = 0; i < outputBus.numberOfChannels(); ++i)
+ memset(outputBus.channel(i)->mutableData() + zeroStartFrame, 0, sizeof(float) * framesToZero);
}
finish();
}
-
- return;
}
-void AudioScheduledSourceNode::start(double when, ExceptionCode& ec)
+ExceptionOr<void> AudioScheduledSourceNode::start(double when)
{
ASSERT(isMainThread());
- if (ScriptController::processingUserGesture())
- context()->removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
+ context().nodeWillBeginPlayback();
- if (m_playbackState != UNSCHEDULED_STATE) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (m_playbackState != UNSCHEDULED_STATE)
+ return Exception { INVALID_STATE_ERR };
+ if (!std::isfinite(when) || when < 0)
+ return Exception { INVALID_STATE_ERR };
m_startTime = when;
m_playbackState = SCHEDULED_STATE;
+
+ return { };
}
-void AudioScheduledSourceNode::stop(double when, ExceptionCode& ec)
+ExceptionOr<void> AudioScheduledSourceNode::stop(double when)
{
ASSERT(isMainThread());
- if (!(m_playbackState == SCHEDULED_STATE || m_playbackState == PLAYING_STATE) || (m_endTime != UnknownTime)) {
- ec = INVALID_STATE_ERR;
- return;
- }
-
- when = std::max<double>(0, when);
- m_endTime = when;
-}
-#if ENABLE(LEGACY_WEB_AUDIO)
-void AudioScheduledSourceNode::noteOn(double when, ExceptionCode& ec)
-{
- start(when, ec);
-}
+ if (m_playbackState == UNSCHEDULED_STATE || m_endTime != UnknownTime)
+ return Exception { INVALID_STATE_ERR };
+ if (!std::isfinite(when) || when < 0)
+ return Exception { INVALID_STATE_ERR };
-void AudioScheduledSourceNode::noteOff(double when, ExceptionCode& ec)
-{
- stop(when, ec);
-}
-#endif
+ m_endTime = when;
-void AudioScheduledSourceNode::setOnended(PassRefPtr<EventListener> listener)
-{
- m_hasEndedListener = listener;
- setAttributeEventListener(eventNames().endedEvent, listener);
+ return { };
}
void AudioScheduledSourceNode::finish()
{
if (m_playbackState != FINISHED_STATE) {
// Let the context dereference this AudioNode.
- context()->notifyNodeFinishedProcessing(this);
+ context().notifyNodeFinishedProcessing(this);
m_playbackState = FINISHED_STATE;
- context()->decrementActiveSourceCount();
+ context().decrementActiveSourceCount();
}
- if (m_hasEndedListener)
- callOnMainThread(&AudioScheduledSourceNode::notifyEndedDispatch, this);
+ if (m_hasEndedListener) {
+ callOnMainThread([strongThis = makeRef(*this)] () mutable {
+ strongThis->dispatchEvent(Event::create(eventNames().endedEvent, false, false));
+ });
+ }
}
-void AudioScheduledSourceNode::notifyEndedDispatch(void* userData)
+bool AudioScheduledSourceNode::addEventListener(const AtomicString& eventType, Ref<EventListener>&& listener, const AddEventListenerOptions& options)
{
- static_cast<AudioScheduledSourceNode*>(userData)->notifyEnded();
+ bool success = AudioNode::addEventListener(eventType, WTFMove(listener), options);
+ if (success && eventType == eventNames().endedEvent)
+ m_hasEndedListener = hasEventListeners(eventNames().endedEvent);
+ return success;
}
-void AudioScheduledSourceNode::notifyEnded()
+bool AudioScheduledSourceNode::removeEventListener(const AtomicString& eventType, EventListener& listener, const ListenerOptions& options)
{
- EventListener* listener = onended();
- if (!listener)
- return;
+ bool success = AudioNode::removeEventListener(eventType, listener, options);
+ if (success && eventType == eventNames().endedEvent)
+ m_hasEndedListener = hasEventListeners(eventNames().endedEvent);
+ return success;
+}
- RefPtr<Event> event = Event::create(eventNames().endedEvent, FALSE, FALSE);
- event->setTarget(this);
- listener->handleEvent(context()->scriptExecutionContext(), event.get());
+void AudioScheduledSourceNode::removeAllEventListeners()
+{
+ m_hasEndedListener = false;
+ AudioNode::removeAllEventListeners();
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.h b/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.h
index 05bff2b37..f6a1b2c1b 100644
--- a/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.h
+++ b/Source/WebCore/Modules/webaudio/AudioScheduledSourceNode.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,22 +26,18 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioScheduledSourceNode_h
-#define AudioScheduledSourceNode_h
+#pragma once
#include "AudioNode.h"
-#include "ExceptionCode.h"
namespace WebCore {
-class AudioBus;
-
class AudioScheduledSourceNode : public AudioNode {
public:
// These are the possible states an AudioScheduledSourceNode can be in:
//
// UNSCHEDULED_STATE - Initial playback state. Created, but not yet scheduled.
- // SCHEDULED_STATE - Scheduled to play (via noteOn() or noteGrainOn()), but not yet playing.
+ // SCHEDULED_STATE - Scheduled to play but not yet playing.
// PLAYING_STATE - Generating sound.
// FINISHED_STATE - Finished generating sound.
//
@@ -55,58 +51,45 @@ public:
FINISHED_STATE = 3
};
- AudioScheduledSourceNode(AudioContext*, float sampleRate);
-
- // Scheduling.
- void start(double when, ExceptionCode&);
- void stop(double when, ExceptionCode&);
+ AudioScheduledSourceNode(AudioContext&, float sampleRate);
-#if ENABLE(LEGACY_WEB_AUDIO)
- void noteOn(double when, ExceptionCode&);
- void noteOff(double when, ExceptionCode&);
-#endif
+ ExceptionOr<void> start(double when);
+ ExceptionOr<void> stop(double when);
unsigned short playbackState() const { return static_cast<unsigned short>(m_playbackState); }
bool isPlayingOrScheduled() const { return m_playbackState == PLAYING_STATE || m_playbackState == SCHEDULED_STATE; }
bool hasFinished() const { return m_playbackState == FINISHED_STATE; }
- EventListener* onended() { return getAttributeEventListener(eventNames().endedEvent); }
- void setOnended(PassRefPtr<EventListener> listener);
-
protected:
// Get frame information for the current time quantum.
// We handle the transition into PLAYING_STATE and FINISHED_STATE here,
// zeroing out portions of the outputBus which are outside the range of startFrame and endFrame.
- //
// Each frame time is relative to the context's currentSampleFrame().
- // quantumFrameOffset : Offset frame in this time quantum to start rendering.
- // nonSilentFramesToProcess : Number of frames rendering non-silence (will be <= quantumFrameSize).
- void updateSchedulingInfo(size_t quantumFrameSize,
- AudioBus* outputBus,
- size_t& quantumFrameOffset,
- size_t& nonSilentFramesToProcess);
+ // quantumFrameOffset: Offset frame in this time quantum to start rendering.
+ // nonSilentFramesToProcess: Number of frames rendering non-silence (will be <= quantumFrameSize).
+ void updateSchedulingInfo(size_t quantumFrameSize, AudioBus& outputBus, size_t& quantumFrameOffset, size_t& nonSilentFramesToProcess);
// Called when we have no more sound to play or the noteOff() time has been reached.
virtual void finish();
- static void notifyEndedDispatch(void*);
- void notifyEnded();
-
- PlaybackState m_playbackState;
+ PlaybackState m_playbackState { UNSCHEDULED_STATE };
// m_startTime is the time to start playing based on the context's timeline (0 or a time less than the context's current time means "now").
- double m_startTime; // in seconds
+ double m_startTime { 0 }; // in seconds
// m_endTime is the time to stop playing based on the context's timeline (0 or a time less than the context's current time means "now").
// If it hasn't been set explicitly, then the sound will not stop playing (if looping) or will stop when the end of the AudioBuffer
// has been reached.
double m_endTime; // in seconds
- bool m_hasEndedListener;
+ bool m_hasEndedListener { false };
static const double UnknownTime;
+
+private:
+ bool addEventListener(const AtomicString& eventType, Ref<EventListener>&&, const AddEventListenerOptions&) override;
+ bool removeEventListener(const AtomicString& eventType, EventListener&, const ListenerOptions&) override;
+ void removeAllEventListeners() override;
};
} // namespace WebCore
-
-#endif // AudioScheduledSourceNode_h
diff --git a/Source/WebCore/Modules/webaudio/AudioSummingJunction.cpp b/Source/WebCore/Modules/webaudio/AudioSummingJunction.cpp
index ed417fadb..ee600a629 100644
--- a/Source/WebCore/Modules/webaudio/AudioSummingJunction.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioSummingJunction.cpp
@@ -34,7 +34,7 @@
namespace WebCore {
-AudioSummingJunction::AudioSummingJunction(AudioContext* context)
+AudioSummingJunction::AudioSummingJunction(AudioContext& context)
: m_context(context)
, m_renderingStateNeedUpdating(false)
{
@@ -42,30 +42,29 @@ AudioSummingJunction::AudioSummingJunction(AudioContext* context)
AudioSummingJunction::~AudioSummingJunction()
{
- if (m_renderingStateNeedUpdating && m_context.get())
- m_context->removeMarkedSummingJunction(this);
+ if (m_renderingStateNeedUpdating)
+ context().removeMarkedSummingJunction(this);
}
void AudioSummingJunction::changedOutputs()
{
- ASSERT(context()->isGraphOwner());
+ ASSERT(context().isGraphOwner());
if (!m_renderingStateNeedUpdating && canUpdateState()) {
- context()->markSummingJunctionDirty(this);
+ context().markSummingJunctionDirty(this);
m_renderingStateNeedUpdating = true;
}
}
void AudioSummingJunction::updateRenderingState()
{
- ASSERT(context()->isAudioThread() && context()->isGraphOwner());
+ ASSERT(context().isAudioThread() && context().isGraphOwner());
if (m_renderingStateNeedUpdating && canUpdateState()) {
// Copy from m_outputs to m_renderingOutputs.
m_renderingOutputs.resize(m_outputs.size());
- unsigned j = 0;
- for (HashSet<AudioNodeOutput*>::iterator i = m_outputs.begin(); i != m_outputs.end(); ++i, ++j) {
- AudioNodeOutput* output = *i;
- m_renderingOutputs[j] = output;
+ unsigned i = 0;
+ for (auto& output : m_outputs) {
+ m_renderingOutputs[i++] = output;
output->updateRenderingState();
}
diff --git a/Source/WebCore/Modules/webaudio/AudioSummingJunction.h b/Source/WebCore/Modules/webaudio/AudioSummingJunction.h
index b3b4f35fe..c94ff6d6a 100644
--- a/Source/WebCore/Modules/webaudio/AudioSummingJunction.h
+++ b/Source/WebCore/Modules/webaudio/AudioSummingJunction.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioSummingJunction_h
-#define AudioSummingJunction_h
+#pragma once
#include "AudioBus.h"
#include <wtf/HashSet.h>
@@ -38,11 +37,11 @@ class AudioNodeOutput;
class AudioSummingJunction {
public:
- explicit AudioSummingJunction(AudioContext*);
+ explicit AudioSummingJunction(AudioContext&);
virtual ~AudioSummingJunction();
// Can be called from any thread.
- AudioContext* context() { return m_context.get(); }
+ AudioContext& context() { return m_context.get(); }
// This must be called whenever we modify m_outputs.
void changedOutputs();
@@ -61,7 +60,7 @@ public:
virtual void didUpdate() = 0;
protected:
- RefPtr<AudioContext> m_context;
+ Ref<AudioContext> m_context;
// m_outputs contains the AudioNodeOutputs representing current connections which are not disabled.
// The rendering code should never use this directly, but instead uses m_renderingOutputs.
@@ -82,5 +81,3 @@ protected:
};
} // namespace WebCore
-
-#endif // AudioSummingJunction_h
diff --git a/Source/WebCore/Modules/webaudio/BiquadDSPKernel.cpp b/Source/WebCore/Modules/webaudio/BiquadDSPKernel.cpp
index 37ef51e05..5561dfd64 100644
--- a/Source/WebCore/Modules/webaudio/BiquadDSPKernel.cpp
+++ b/Source/WebCore/Modules/webaudio/BiquadDSPKernel.cpp
@@ -76,35 +76,35 @@ void BiquadDSPKernel::updateCoefficientsIfNecessary(bool useSmoothing, bool forc
// Configure the biquad with the new filter parameters for the appropriate type of filter.
switch (biquadProcessor()->type()) {
- case BiquadProcessor::LowPass:
+ case BiquadFilterType::Lowpass:
m_biquad.setLowpassParams(normalizedFrequency, value2);
break;
- case BiquadProcessor::HighPass:
+ case BiquadFilterType::Highpass:
m_biquad.setHighpassParams(normalizedFrequency, value2);
break;
- case BiquadProcessor::BandPass:
+ case BiquadFilterType::Bandpass:
m_biquad.setBandpassParams(normalizedFrequency, value2);
break;
- case BiquadProcessor::LowShelf:
+ case BiquadFilterType::Lowshelf:
m_biquad.setLowShelfParams(normalizedFrequency, gain);
break;
- case BiquadProcessor::HighShelf:
+ case BiquadFilterType::Highshelf:
m_biquad.setHighShelfParams(normalizedFrequency, gain);
break;
- case BiquadProcessor::Peaking:
+ case BiquadFilterType::Peaking:
m_biquad.setPeakingParams(normalizedFrequency, value2, gain);
break;
- case BiquadProcessor::Notch:
+ case BiquadFilterType::Notch:
m_biquad.setNotchParams(normalizedFrequency, value2);
break;
- case BiquadProcessor::Allpass:
+ case BiquadFilterType::Allpass:
m_biquad.setAllpassParams(normalizedFrequency, value2);
break;
}
diff --git a/Source/WebCore/Modules/webaudio/BiquadDSPKernel.h b/Source/WebCore/Modules/webaudio/BiquadDSPKernel.h
index f3eb0f5b0..935552446 100644
--- a/Source/WebCore/Modules/webaudio/BiquadDSPKernel.h
+++ b/Source/WebCore/Modules/webaudio/BiquadDSPKernel.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BiquadDSPKernel_h
-#define BiquadDSPKernel_h
+#pragma once
#include "AudioDSPKernel.h"
#include "Biquad.h"
@@ -43,8 +42,8 @@ public:
}
// AudioDSPKernel
- virtual void process(const float* source, float* dest, size_t framesToProcess) override;
- virtual void reset() override { m_biquad.reset(); }
+ void process(const float* source, float* dest, size_t framesToProcess) override;
+ void reset() override { m_biquad.reset(); }
// Get the magnitude and phase response of the filter at the given
// set of frequencies (in Hz). The phase response is in radians.
@@ -53,8 +52,8 @@ public:
float* magResponse,
float* phaseResponse);
- virtual double tailTime() const override;
- virtual double latencyTime() const override;
+ double tailTime() const override;
+ double latencyTime() const override;
protected:
Biquad m_biquad;
@@ -71,5 +70,3 @@ protected:
};
} // namespace WebCore
-
-#endif // BiquadDSPKernel_h
diff --git a/Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp b/Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp
index b05b1445c..c5eaab004 100644
--- a/Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp
+++ b/Source/WebCore/Modules/webaudio/BiquadFilterNode.cpp
@@ -28,11 +28,9 @@
#include "BiquadFilterNode.h"
-#include "ExceptionCode.h"
-
namespace WebCore {
-BiquadFilterNode::BiquadFilterNode(AudioContext* context, float sampleRate)
+BiquadFilterNode::BiquadFilterNode(AudioContext& context, float sampleRate)
: AudioBasicProcessorNode(context, sampleRate)
{
// Initially setup as lowpass filter.
@@ -40,78 +38,25 @@ BiquadFilterNode::BiquadFilterNode(AudioContext* context, float sampleRate)
setNodeType(NodeTypeBiquadFilter);
}
-String BiquadFilterNode::type() const
+BiquadFilterType BiquadFilterNode::type() const
{
- switch (const_cast<BiquadFilterNode*>(this)->biquadProcessor()->type()) {
- case BiquadProcessor::LowPass:
- return "lowpass";
- case BiquadProcessor::HighPass:
- return "highpass";
- case BiquadProcessor::BandPass:
- return "bandpass";
- case BiquadProcessor::LowShelf:
- return "lowshelf";
- case BiquadProcessor::HighShelf:
- return "highshelf";
- case BiquadProcessor::Peaking:
- return "peaking";
- case BiquadProcessor::Notch:
- return "notch";
- case BiquadProcessor::Allpass:
- return "allpass";
- default:
- ASSERT_NOT_REACHED();
- return "lowpass";
- }
+ return const_cast<BiquadFilterNode*>(this)->biquadProcessor()->type();
}
-void BiquadFilterNode::setType(const String& type)
+void BiquadFilterNode::setType(BiquadFilterType type)
{
- if (type == "lowpass")
- setType(BiquadProcessor::LowPass);
- else if (type == "highpass")
- setType(BiquadProcessor::HighPass);
- else if (type == "bandpass")
- setType(BiquadProcessor::BandPass);
- else if (type == "lowshelf")
- setType(BiquadProcessor::LowShelf);
- else if (type == "highshelf")
- setType(BiquadProcessor::HighShelf);
- else if (type == "peaking")
- setType(BiquadProcessor::Peaking);
- else if (type == "notch")
- setType(BiquadProcessor::Notch);
- else if (type == "allpass")
- setType(BiquadProcessor::Allpass);
- else
- ASSERT_NOT_REACHED();
-}
-
-bool BiquadFilterNode::setType(unsigned type)
-{
- if (type > BiquadProcessor::Allpass)
- return false;
-
- biquadProcessor()->setType(static_cast<BiquadProcessor::FilterType>(type));
- return true;
+ biquadProcessor()->setType(type);
}
-void BiquadFilterNode::getFrequencyResponse(const Float32Array* frequencyHz,
- Float32Array* magResponse,
- Float32Array* phaseResponse)
+void BiquadFilterNode::getFrequencyResponse(const RefPtr<Float32Array>& frequencyHz, const RefPtr<Float32Array>& magResponse, const RefPtr<Float32Array>& phaseResponse)
{
if (!frequencyHz || !magResponse || !phaseResponse)
return;
- int n = std::min(frequencyHz->length(),
- std::min(magResponse->length(), phaseResponse->length()));
+ int n = std::min(frequencyHz->length(), std::min(magResponse->length(), phaseResponse->length()));
- if (n) {
- biquadProcessor()->getFrequencyResponse(n,
- frequencyHz->data(),
- magResponse->data(),
- phaseResponse->data());
- }
+ if (n)
+ biquadProcessor()->getFrequencyResponse(n, frequencyHz->data(), magResponse->data(), phaseResponse->data());
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/BiquadFilterNode.h b/Source/WebCore/Modules/webaudio/BiquadFilterNode.h
index dde2aec77..986d28607 100644
--- a/Source/WebCore/Modules/webaudio/BiquadFilterNode.h
+++ b/Source/WebCore/Modules/webaudio/BiquadFilterNode.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BiquadFilterNode_h
-#define BiquadFilterNode_h
+#pragma once
#include "AudioBasicProcessorNode.h"
#include "BiquadProcessor.h"
@@ -31,29 +30,16 @@
namespace WebCore {
class AudioParam;
-
+
class BiquadFilterNode : public AudioBasicProcessorNode {
public:
- // These must be defined as in the .idl file and must match those in the BiquadProcessor class.
- enum {
- LOWPASS = 0,
- HIGHPASS = 1,
- BANDPASS = 2,
- LOWSHELF = 3,
- HIGHSHELF = 4,
- PEAKING = 5,
- NOTCH = 6,
- ALLPASS = 7
- };
-
- static PassRefPtr<BiquadFilterNode> create(AudioContext* context, float sampleRate)
+ static Ref<BiquadFilterNode> create(AudioContext& context, float sampleRate)
{
- return adoptRef(new BiquadFilterNode(context, sampleRate));
+ return adoptRef(*new BiquadFilterNode(context, sampleRate));
}
- String type() const;
- bool setType(unsigned); // Returns true on success.
- void setType(const String&);
+ BiquadFilterType type() const;
+ void setType(BiquadFilterType);
AudioParam* frequency() { return biquadProcessor()->parameter1(); }
AudioParam* q() { return biquadProcessor()->parameter2(); }
@@ -62,16 +48,12 @@ public:
// Get the magnitude and phase response of the filter at the given
// set of frequencies (in Hz). The phase response is in radians.
- void getFrequencyResponse(const Float32Array* frequencyHz,
- Float32Array* magResponse,
- Float32Array* phaseResponse);
+ void getFrequencyResponse(const RefPtr<Float32Array>& frequencyHz, const RefPtr<Float32Array>& magResponse, const RefPtr<Float32Array>& phaseResponse);
private:
- BiquadFilterNode(AudioContext*, float sampleRate);
+ BiquadFilterNode(AudioContext&, float sampleRate);
BiquadProcessor* biquadProcessor() { return static_cast<BiquadProcessor*>(processor()); }
};
} // namespace WebCore
-
-#endif // BiquadFilterNode_h
diff --git a/Source/WebCore/Modules/webaudio/BiquadFilterNode.idl b/Source/WebCore/Modules/webaudio/BiquadFilterNode.idl
index 40424ef51..c54bd49ad 100644
--- a/Source/WebCore/Modules/webaudio/BiquadFilterNode.idl
+++ b/Source/WebCore/Modules/webaudio/BiquadFilterNode.idl
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2011, Google Inc. All rights reserved.
+ * Copyright (C) 2016, Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,26 +25,29 @@
[
Conditional=WEB_AUDIO,
+ ImplementedAs=BiquadFilterType
+] enum BiquadFilterType {
+ "lowpass",
+ "highpass",
+ "bandpass",
+ "lowshelf",
+ "highshelf",
+ "peaking",
+ "notch",
+ "allpass"
+};
+
+[
+ Conditional=WEB_AUDIO,
JSGenerateToJSObject,
] interface BiquadFilterNode : AudioNode {
- // Filter type.
- const unsigned short LOWPASS = 0;
- const unsigned short HIGHPASS = 1;
- const unsigned short BANDPASS = 2;
- const unsigned short LOWSHELF = 3;
- const unsigned short HIGHSHELF = 4;
- const unsigned short PEAKING = 5;
- const unsigned short NOTCH = 6;
- const unsigned short ALLPASS = 7;
-
- [CustomSetter] attribute DOMString type;
+ attribute BiquadFilterType type;
readonly attribute AudioParam frequency; // in Hertz
readonly attribute AudioParam detune; // in Cents
readonly attribute AudioParam Q; // Quality factor
readonly attribute AudioParam gain; // in Decibels
- void getFrequencyResponse(Float32Array frequencyHz,
- Float32Array magResponse,
- Float32Array phaseResponse);
+ // FIXME: the parameters should not be nullable.
+ void getFrequencyResponse(Float32Array? frequencyHz, Float32Array? magResponse, Float32Array? phaseResponse);
};
diff --git a/Source/WebCore/Modules/webaudio/BiquadProcessor.cpp b/Source/WebCore/Modules/webaudio/BiquadProcessor.cpp
index 9c98e3dd5..0e4e3685b 100644
--- a/Source/WebCore/Modules/webaudio/BiquadProcessor.cpp
+++ b/Source/WebCore/Modules/webaudio/BiquadProcessor.cpp
@@ -32,9 +32,9 @@
namespace WebCore {
-BiquadProcessor::BiquadProcessor(AudioContext* context, float sampleRate, size_t numberOfChannels, bool autoInitialize)
+BiquadProcessor::BiquadProcessor(AudioContext& context, float sampleRate, size_t numberOfChannels, bool autoInitialize)
: AudioDSPKernelProcessor(sampleRate, numberOfChannels)
- , m_type(LowPass)
+ , m_type(BiquadFilterType::Lowpass)
, m_parameter1(0)
, m_parameter2(0)
, m_parameter3(0)
@@ -111,7 +111,7 @@ void BiquadProcessor::process(const AudioBus* source, AudioBus* destination, siz
m_kernels[i]->process(source->channel(i)->data(), destination->channel(i)->mutableData(), framesToProcess);
}
-void BiquadProcessor::setType(FilterType type)
+void BiquadProcessor::setType(BiquadFilterType type)
{
if (type != m_type) {
m_type = type;
@@ -119,10 +119,7 @@ void BiquadProcessor::setType(FilterType type)
}
}
-void BiquadProcessor::getFrequencyResponse(int nFrequencies,
- const float* frequencyHz,
- float* magResponse,
- float* phaseResponse)
+void BiquadProcessor::getFrequencyResponse(int nFrequencies, const float* frequencyHz, float* magResponse, float* phaseResponse)
{
// Compute the frequency response on a separate temporary kernel
// to avoid interfering with the processing running in the audio
diff --git a/Source/WebCore/Modules/webaudio/BiquadProcessor.h b/Source/WebCore/Modules/webaudio/BiquadProcessor.h
index db6de3e1a..5079f1375 100644
--- a/Source/WebCore/Modules/webaudio/BiquadProcessor.h
+++ b/Source/WebCore/Modules/webaudio/BiquadProcessor.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BiquadProcessor_h
-#define BiquadProcessor_h
+#pragma once
#include "AudioDSPKernel.h"
#include "AudioDSPKernelProcessor.h"
@@ -37,26 +36,26 @@ namespace WebCore {
// BiquadProcessor is an AudioDSPKernelProcessor which uses Biquad objects to implement several common filters.
+enum class BiquadFilterType {
+ Lowpass,
+ Highpass,
+ Bandpass,
+ Lowshelf,
+ Highshelf,
+ Peaking,
+ Notch,
+ Allpass
+};
+
class BiquadProcessor : public AudioDSPKernelProcessor {
public:
- enum FilterType {
- LowPass = 0,
- HighPass = 1,
- BandPass = 2,
- LowShelf = 3,
- HighShelf = 4,
- Peaking = 5,
- Notch = 6,
- Allpass = 7
- };
-
- BiquadProcessor(AudioContext*, float sampleRate, size_t numberOfChannels, bool autoInitialize);
+ BiquadProcessor(AudioContext&, float sampleRate, size_t numberOfChannels, bool autoInitialize);
virtual ~BiquadProcessor();
- virtual std::unique_ptr<AudioDSPKernel> createKernel() override;
+ std::unique_ptr<AudioDSPKernel> createKernel() override;
- virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override;
+ void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override;
// Get the magnitude and phase response of the filter at the given
// set of frequencies (in Hz). The phase response is in radians.
@@ -75,11 +74,11 @@ public:
AudioParam* parameter3() { return m_parameter3.get(); }
AudioParam* parameter4() { return m_parameter4.get(); }
- FilterType type() const { return m_type; }
- void setType(FilterType);
+ BiquadFilterType type() const { return m_type; }
+ void setType(BiquadFilterType);
private:
- FilterType m_type;
+ BiquadFilterType m_type;
RefPtr<AudioParam> m_parameter1;
RefPtr<AudioParam> m_parameter2;
@@ -94,5 +93,3 @@ private:
};
} // namespace WebCore
-
-#endif // BiquadProcessor_h
diff --git a/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp b/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp
index 9f1989a92..b83c66769 100644
--- a/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp
+++ b/Source/WebCore/Modules/webaudio/ChannelMergerNode.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -40,15 +40,15 @@ const unsigned DefaultNumberOfOutputChannels = 1;
namespace WebCore {
-PassRefPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContext* context, float sampleRate, unsigned numberOfInputs)
+RefPtr<ChannelMergerNode> ChannelMergerNode::create(AudioContext& context, float sampleRate, unsigned numberOfInputs)
{
if (!numberOfInputs || numberOfInputs > AudioContext::maxNumberOfChannels())
return nullptr;
- return adoptRef(new ChannelMergerNode(context, sampleRate, numberOfInputs));
+ return adoptRef(*new ChannelMergerNode(context, sampleRate, numberOfInputs));
}
-ChannelMergerNode::ChannelMergerNode(AudioContext* context, float sampleRate, unsigned numberOfInputs)
+ChannelMergerNode::ChannelMergerNode(AudioContext& context, float sampleRate, unsigned numberOfInputs)
: AudioNode(context, sampleRate)
, m_desiredNumberOfOutputChannels(DefaultNumberOfOutputChannels)
{
@@ -104,7 +104,7 @@ void ChannelMergerNode::reset()
// number of channels of our output.
void ChannelMergerNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
{
- ASSERT(context()->isAudioThread() && context()->isGraphOwner());
+ ASSERT(context().isAudioThread() && context().isGraphOwner());
// Count how many channels we have all together from all of the inputs.
unsigned numberOfOutputChannels = 0;
diff --git a/Source/WebCore/Modules/webaudio/ChannelMergerNode.h b/Source/WebCore/Modules/webaudio/ChannelMergerNode.h
index 0cc783e45..3071b5605 100644
--- a/Source/WebCore/Modules/webaudio/ChannelMergerNode.h
+++ b/Source/WebCore/Modules/webaudio/ChannelMergerNode.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,11 +26,9 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ChannelMergerNode_h
-#define ChannelMergerNode_h
+#pragma once
#include "AudioNode.h"
-#include <wtf/PassRefPtr.h>
namespace WebCore {
@@ -38,24 +36,22 @@ class AudioContext;
class ChannelMergerNode : public AudioNode {
public:
- static PassRefPtr<ChannelMergerNode> create(AudioContext*, float sampleRate, unsigned numberOfInputs);
+ static RefPtr<ChannelMergerNode> create(AudioContext&, float sampleRate, unsigned numberOfInputs);
// AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
+ void process(size_t framesToProcess) override;
+ void reset() override;
// Called in the audio thread (pre-rendering task) when the number of channels for an input may have changed.
- virtual void checkNumberOfChannelsForInput(AudioNodeInput*) override;
+ void checkNumberOfChannelsForInput(AudioNodeInput*) override;
private:
unsigned m_desiredNumberOfOutputChannels;
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ double tailTime() const override { return 0; }
+ double latencyTime() const override { return 0; }
- ChannelMergerNode(AudioContext*, float sampleRate, unsigned numberOfInputs);
+ ChannelMergerNode(AudioContext&, float sampleRate, unsigned numberOfInputs);
};
} // namespace WebCore
-
-#endif // ChannelMergerNode_h
diff --git a/Source/WebCore/Modules/webaudio/ChannelMergerNode.idl b/Source/WebCore/Modules/webaudio/ChannelMergerNode.idl
index 8158d7c22..caf989d80 100644
--- a/Source/WebCore/Modules/webaudio/ChannelMergerNode.idl
+++ b/Source/WebCore/Modules/webaudio/ChannelMergerNode.idl
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
diff --git a/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp b/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp
index 88ed29d6c..74eff1d78 100644
--- a/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp
+++ b/Source/WebCore/Modules/webaudio/ChannelSplitterNode.cpp
@@ -34,15 +34,15 @@
namespace WebCore {
-PassRefPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContext* context, float sampleRate, unsigned numberOfOutputs)
+RefPtr<ChannelSplitterNode> ChannelSplitterNode::create(AudioContext& context, float sampleRate, unsigned numberOfOutputs)
{
if (!numberOfOutputs || numberOfOutputs > AudioContext::maxNumberOfChannels())
return nullptr;
- return adoptRef(new ChannelSplitterNode(context, sampleRate, numberOfOutputs));
+ return adoptRef(*new ChannelSplitterNode(context, sampleRate, numberOfOutputs));
}
-ChannelSplitterNode::ChannelSplitterNode(AudioContext* context, float sampleRate, unsigned numberOfOutputs)
+ChannelSplitterNode::ChannelSplitterNode(AudioContext& context, float sampleRate, unsigned numberOfOutputs)
: AudioNode(context, sampleRate)
{
addInput(std::make_unique<AudioNodeInput>(this));
diff --git a/Source/WebCore/Modules/webaudio/ChannelSplitterNode.h b/Source/WebCore/Modules/webaudio/ChannelSplitterNode.h
index 9d07279e2..acddc6c70 100644
--- a/Source/WebCore/Modules/webaudio/ChannelSplitterNode.h
+++ b/Source/WebCore/Modules/webaudio/ChannelSplitterNode.h
@@ -22,11 +22,9 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ChannelSplitterNode_h
-#define ChannelSplitterNode_h
+#pragma once
#include "AudioNode.h"
-#include <wtf/PassRefPtr.h>
namespace WebCore {
@@ -34,19 +32,17 @@ class AudioContext;
class ChannelSplitterNode : public AudioNode {
public:
- static PassRefPtr<ChannelSplitterNode> create(AudioContext*, float sampleRate, unsigned numberOfOutputs);
+ static RefPtr<ChannelSplitterNode> create(AudioContext&, float sampleRate, unsigned numberOfOutputs);
// AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
+ void process(size_t framesToProcess) override;
+ void reset() override;
private:
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ double tailTime() const override { return 0; }
+ double latencyTime() const override { return 0; }
- ChannelSplitterNode(AudioContext*, float sampleRate, unsigned numberOfOutputs);
+ ChannelSplitterNode(AudioContext&, float sampleRate, unsigned numberOfOutputs);
};
} // namespace WebCore
-
-#endif // ChannelSplitterNode_h
diff --git a/Source/WebCore/Modules/webaudio/ConvolverNode.cpp b/Source/WebCore/Modules/webaudio/ConvolverNode.cpp
index c5ac5ea52..03e0a009b 100644
--- a/Source/WebCore/Modules/webaudio/ConvolverNode.cpp
+++ b/Source/WebCore/Modules/webaudio/ConvolverNode.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2010, Google Inc. All rights reserved.
+ * Copyright (C) 2016, Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,12 +29,9 @@
#include "ConvolverNode.h"
-#include "AudioBuffer.h"
-#include "AudioContext.h"
#include "AudioNodeInput.h"
#include "AudioNodeOutput.h"
#include "Reverb.h"
-#include <wtf/MainThread.h>
// Note about empirical tuning:
// The maximum FFT size affects reverb performance and accuracy.
@@ -45,9 +43,8 @@ const size_t MaxFFTSize = 32768;
namespace WebCore {
-ConvolverNode::ConvolverNode(AudioContext* context, float sampleRate)
+ConvolverNode::ConvolverNode(AudioContext& context, float sampleRate)
: AudioNode(context, sampleRate)
- , m_normalize(true)
{
addInput(std::make_unique<AudioNodeInput>(this));
addOutput(std::make_unique<AudioNodeOutput>(this, 2));
@@ -73,7 +70,7 @@ void ConvolverNode::process(size_t framesToProcess)
ASSERT(outputBus);
// Synchronize with possible dynamic changes to the impulse response.
- std::unique_lock<std::mutex> lock(m_processMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_processMutex, std::try_to_lock);
if (!lock.owns_lock()) {
// Too bad - the try_lock() failed. We must be in the middle of setting a new impulse response.
outputBus->zero();
@@ -93,7 +90,7 @@ void ConvolverNode::process(size_t framesToProcess)
void ConvolverNode::reset()
{
- std::lock_guard<std::mutex> lock(m_processMutex);
+ std::lock_guard<Lock> lock(m_processMutex);
if (m_reverb)
m_reverb->reset();
}
@@ -102,7 +99,7 @@ void ConvolverNode::initialize()
{
if (isInitialized())
return;
-
+
AudioNode::initialize();
}
@@ -115,40 +112,46 @@ void ConvolverNode::uninitialize()
AudioNode::uninitialize();
}
-void ConvolverNode::setBuffer(AudioBuffer* buffer)
+ExceptionOr<void> ConvolverNode::setBuffer(AudioBuffer* buffer)
{
ASSERT(isMainThread());
if (!buffer)
- return;
+ return { };
+
+ if (buffer->sampleRate() != context().sampleRate())
+ return Exception { NOT_SUPPORTED_ERR };
unsigned numberOfChannels = buffer->numberOfChannels();
size_t bufferLength = buffer->length();
- // The current implementation supports up to four channel impulse responses, which are interpreted as true-stereo (see Reverb class).
- bool isBufferGood = numberOfChannels > 0 && numberOfChannels <= 4 && bufferLength;
- ASSERT(isBufferGood);
- if (!isBufferGood)
- return;
+ // The current implementation supports only 1-, 2-, or 4-channel impulse responses, with the
+ // 4-channel response being interpreted as true-stereo (see Reverb class).
+ bool isChannelCountGood = (numberOfChannels == 1 || numberOfChannels == 2 || numberOfChannels == 4) && bufferLength;
+
+ if (!isChannelCountGood)
+ return Exception { NOT_SUPPORTED_ERR };
// Wrap the AudioBuffer by an AudioBus. It's an efficient pointer set and not a memcpy().
// This memory is simply used in the Reverb constructor and no reference to it is kept for later use in that class.
- RefPtr<AudioBus> bufferBus = AudioBus::create(numberOfChannels, bufferLength, false);
+ auto bufferBus = AudioBus::create(numberOfChannels, bufferLength, false);
for (unsigned i = 0; i < numberOfChannels; ++i)
- bufferBus->setChannelMemory(i, buffer->getChannelData(i)->data(), bufferLength);
+ bufferBus->setChannelMemory(i, buffer->channelData(i)->data(), bufferLength);
bufferBus->setSampleRate(buffer->sampleRate());
// Create the reverb with the given impulse response.
- bool useBackgroundThreads = !context()->isOfflineContext();
+ bool useBackgroundThreads = !context().isOfflineContext();
auto reverb = std::make_unique<Reverb>(bufferBus.get(), AudioNode::ProcessingSizeInFrames, MaxFFTSize, 2, useBackgroundThreads, m_normalize);
{
// Synchronize with process().
- std::lock_guard<std::mutex> lock(m_processMutex);
- m_reverb = std::move(reverb);
+ std::lock_guard<Lock> lock(m_processMutex);
+ m_reverb = WTFMove(reverb);
m_buffer = buffer;
}
+
+ return { };
}
AudioBuffer* ConvolverNode::buffer()
diff --git a/Source/WebCore/Modules/webaudio/ConvolverNode.h b/Source/WebCore/Modules/webaudio/ConvolverNode.h
index 4417a0121..c0074d9a6 100644
--- a/Source/WebCore/Modules/webaudio/ConvolverNode.h
+++ b/Source/WebCore/Modules/webaudio/ConvolverNode.h
@@ -22,57 +22,50 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ConvolverNode_h
-#define ConvolverNode_h
+#pragma once
#include "AudioNode.h"
-#include <memory>
-#include <mutex>
-#include <wtf/RefPtr.h>
+#include <wtf/Lock.h>
namespace WebCore {
class AudioBuffer;
class Reverb;
-class ConvolverNode : public AudioNode {
+class ConvolverNode final : public AudioNode {
public:
- static PassRefPtr<ConvolverNode> create(AudioContext* context, float sampleRate)
+ static Ref<ConvolverNode> create(AudioContext& context, float sampleRate)
{
- return adoptRef(new ConvolverNode(context, sampleRate));
+ return adoptRef(*new ConvolverNode(context, sampleRate));
}
virtual ~ConvolverNode();
- // AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
- virtual void initialize() override;
- virtual void uninitialize() override;
-
- // Impulse responses
- void setBuffer(AudioBuffer*);
+ ExceptionOr<void> setBuffer(AudioBuffer*);
AudioBuffer* buffer();
bool normalize() const { return m_normalize; }
void setNormalize(bool normalize) { m_normalize = normalize; }
private:
- ConvolverNode(AudioContext*, float sampleRate);
+ ConvolverNode(AudioContext&, float sampleRate);
+
+ double tailTime() const final;
+ double latencyTime() const final;
- virtual double tailTime() const override;
- virtual double latencyTime() const override;
+ void process(size_t framesToProcess) final;
+ void reset() final;
+ void initialize() final;
+ void uninitialize() final;
std::unique_ptr<Reverb> m_reverb;
RefPtr<AudioBuffer> m_buffer;
// This synchronizes dynamic changes to the convolution impulse response with process().
- mutable std::mutex m_processMutex;
+ mutable Lock m_processMutex;
- // Normalize the impulse response or not. Must default to true.
- bool m_normalize;
+ // Normalize the impulse response or not.
+ bool m_normalize { true };
};
} // namespace WebCore
-
-#endif // ConvolverNode_h
diff --git a/Source/WebCore/Modules/webaudio/ConvolverNode.idl b/Source/WebCore/Modules/webaudio/ConvolverNode.idl
index 5451d4e58..e661b5766 100644
--- a/Source/WebCore/Modules/webaudio/ConvolverNode.idl
+++ b/Source/WebCore/Modules/webaudio/ConvolverNode.idl
@@ -27,6 +27,6 @@
Conditional=WEB_AUDIO,
JSGenerateToJSObject
] interface ConvolverNode : AudioNode {
- attribute AudioBuffer buffer;
+ [SetterMayThrowException] attribute AudioBuffer? buffer;
attribute boolean normalize;
};
diff --git a/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp b/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp
index 6b4f9b85a..b2542d326 100644
--- a/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp
+++ b/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.cpp
@@ -28,17 +28,18 @@
#include "DefaultAudioDestinationNode.h"
-#include "ExceptionCode.h"
+#include "AudioContext.h"
+#include "AudioDestination.h"
#include "Logging.h"
+#include "ScriptExecutionContext.h"
#include <wtf/MainThread.h>
const unsigned EnabledInputChannels = 2;
namespace WebCore {
-DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContext* context)
+DefaultAudioDestinationNode::DefaultAudioDestinationNode(AudioContext& context)
: AudioDestinationNode(context, AudioDestination::hardwareSampleRate())
- , m_numberOfInputChannels(0)
{
// Node-specific default mixing rules.
m_channelCount = 2;
@@ -104,12 +105,38 @@ void DefaultAudioDestinationNode::startRendering()
m_destination->start();
}
-unsigned long DefaultAudioDestinationNode::maxChannelCount() const
+void DefaultAudioDestinationNode::resume(Function<void ()>&& function)
+{
+ ASSERT(isInitialized());
+ if (isInitialized())
+ m_destination->start();
+ if (auto scriptExecutionContext = context().scriptExecutionContext())
+ scriptExecutionContext->postTask(WTFMove(function));
+}
+
+void DefaultAudioDestinationNode::suspend(Function<void ()>&& function)
+{
+ ASSERT(isInitialized());
+ if (isInitialized())
+ m_destination->stop();
+ if (auto scriptExecutionContext = context().scriptExecutionContext())
+ scriptExecutionContext->postTask(WTFMove(function));
+}
+
+void DefaultAudioDestinationNode::close(Function<void()>&& function)
+{
+ ASSERT(isInitialized());
+ uninitialize();
+ if (auto scriptExecutionContext = context().scriptExecutionContext())
+ scriptExecutionContext->postTask(WTFMove(function));
+}
+
+unsigned DefaultAudioDestinationNode::maxChannelCount() const
{
return AudioDestination::maxChannelCount();
}
-void DefaultAudioDestinationNode::setChannelCount(unsigned long channelCount, ExceptionCode& ec)
+ExceptionOr<void> DefaultAudioDestinationNode::setChannelCount(unsigned channelCount)
{
// The channelCount for the input to this node controls the actual number of channels we
// send to the audio hardware. It can only be set depending on the maximum number of
@@ -117,20 +144,27 @@ void DefaultAudioDestinationNode::setChannelCount(unsigned long channelCount, Ex
ASSERT(isMainThread());
- if (!maxChannelCount() || channelCount > maxChannelCount()) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (!maxChannelCount() || channelCount > maxChannelCount())
+ return Exception { INVALID_STATE_ERR };
- unsigned long oldChannelCount = this->channelCount();
- AudioNode::setChannelCount(channelCount, ec);
+ auto oldChannelCount = this->channelCount();
+ auto result = AudioNode::setChannelCount(channelCount);
+ if (result.hasException())
+ return result;
- if (!ec && this->channelCount() != oldChannelCount && isInitialized()) {
+ if (this->channelCount() != oldChannelCount && isInitialized()) {
// Re-create destination.
m_destination->stop();
createDestination();
m_destination->start();
}
+
+ return { };
+}
+
+bool DefaultAudioDestinationNode::isPlaying()
+{
+ return m_destination && m_destination->isPlaying();
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h b/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
index 574fed222..d8f6d478a 100644
--- a/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
+++ b/Source/WebCore/Modules/webaudio/DefaultAudioDestinationNode.h
@@ -22,45 +22,42 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DefaultAudioDestinationNode_h
-#define DefaultAudioDestinationNode_h
+#pragma once
-#include "AudioDestination.h"
#include "AudioDestinationNode.h"
-#include <memory>
namespace WebCore {
-class AudioContext;
+class AudioDestination;
-class DefaultAudioDestinationNode : public AudioDestinationNode {
+class DefaultAudioDestinationNode final : public AudioDestinationNode {
public:
- static PassRefPtr<DefaultAudioDestinationNode> create(AudioContext* context)
+ static Ref<DefaultAudioDestinationNode> create(AudioContext& context)
{
- return adoptRef(new DefaultAudioDestinationNode(context));
+ return adoptRef(*new DefaultAudioDestinationNode(context));
}
virtual ~DefaultAudioDestinationNode();
- // AudioNode
- virtual void initialize() override;
- virtual void uninitialize() override;
- virtual void setChannelCount(unsigned long, ExceptionCode&) override;
-
- // AudioDestinationNode
- virtual void enableInput(const String& inputDeviceId) override;
- virtual void startRendering() override;
- virtual unsigned long maxChannelCount() const override;
-
private:
- explicit DefaultAudioDestinationNode(AudioContext*);
+ explicit DefaultAudioDestinationNode(AudioContext&);
void createDestination();
+ void initialize() final;
+ void uninitialize() final;
+ ExceptionOr<void> setChannelCount(unsigned) final;
+
+ void enableInput(const String& inputDeviceId) final;
+ void startRendering() final;
+ void resume(Function<void ()>&&) final;
+ void suspend(Function<void ()>&&) final;
+ void close(Function<void ()>&&) final;
+ unsigned maxChannelCount() const final;
+ bool isPlaying() final;
+
std::unique_ptr<AudioDestination> m_destination;
String m_inputDeviceId;
- unsigned m_numberOfInputChannels;
+ unsigned m_numberOfInputChannels { 0 };
};
} // namespace WebCore
-
-#endif // DefaultAudioDestinationNode_h
diff --git a/Source/WebCore/Modules/webaudio/DelayDSPKernel.h b/Source/WebCore/Modules/webaudio/DelayDSPKernel.h
index f231d4423..b249afb00 100644
--- a/Source/WebCore/Modules/webaudio/DelayDSPKernel.h
+++ b/Source/WebCore/Modules/webaudio/DelayDSPKernel.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DelayDSPKernel_h
-#define DelayDSPKernel_h
+#pragma once
#include "AudioArray.h"
#include "AudioDSPKernel.h"
@@ -38,15 +37,15 @@ public:
explicit DelayDSPKernel(DelayProcessor*);
DelayDSPKernel(double maxDelayTime, float sampleRate);
- virtual void process(const float* source, float* destination, size_t framesToProcess) override;
- virtual void reset() override;
+ void process(const float* source, float* destination, size_t framesToProcess) override;
+ void reset() override;
double maxDelayTime() const { return m_maxDelayTime; }
void setDelayFrames(double numberOfFrames) { m_desiredDelayFrames = numberOfFrames; }
- virtual double tailTime() const override;
- virtual double latencyTime() const override;
+ double tailTime() const override;
+ double latencyTime() const override;
private:
AudioFloatArray m_buffer;
@@ -64,5 +63,3 @@ private:
};
} // namespace WebCore
-
-#endif // DelayDSPKernel_h
diff --git a/Source/WebCore/Modules/webaudio/DelayNode.cpp b/Source/WebCore/Modules/webaudio/DelayNode.cpp
index 0b0452ceb..7eae8091c 100644
--- a/Source/WebCore/Modules/webaudio/DelayNode.cpp
+++ b/Source/WebCore/Modules/webaudio/DelayNode.cpp
@@ -28,24 +28,29 @@
#include "DelayNode.h"
+#include "DelayProcessor.h"
+
namespace WebCore {
const double maximumAllowedDelayTime = 180;
-DelayNode::DelayNode(AudioContext* context, float sampleRate, double maxDelayTime, ExceptionCode& ec)
+inline DelayNode::DelayNode(AudioContext& context, float sampleRate, double maxDelayTime)
: AudioBasicProcessorNode(context, sampleRate)
{
- if (maxDelayTime <= 0 || maxDelayTime >= maximumAllowedDelayTime) {
- ec = NOT_SUPPORTED_ERR;
- return;
- }
m_processor = std::make_unique<DelayProcessor>(context, sampleRate, 1, maxDelayTime);
setNodeType(NodeTypeDelay);
}
+ExceptionOr<Ref<DelayNode>> DelayNode::create(AudioContext& context, float sampleRate, double maxDelayTime)
+{
+ if (maxDelayTime <= 0 || maxDelayTime >= maximumAllowedDelayTime)
+ return Exception { NOT_SUPPORTED_ERR };
+ return adoptRef(*new DelayNode(context, sampleRate, maxDelayTime));
+}
+
AudioParam* DelayNode::delayTime()
{
- return delayProcessor()->delayTime();
+ return static_cast<DelayProcessor&>(*m_processor).delayTime();
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/DelayNode.h b/Source/WebCore/Modules/webaudio/DelayNode.h
index da61e69e6..06ca9a7a6 100644
--- a/Source/WebCore/Modules/webaudio/DelayNode.h
+++ b/Source/WebCore/Modules/webaudio/DelayNode.h
@@ -22,33 +22,20 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DelayNode_h
-#define DelayNode_h
+#pragma once
#include "AudioBasicProcessorNode.h"
-#include "DelayProcessor.h"
-#include "ExceptionCode.h"
-#include <wtf/PassRefPtr.h>
namespace WebCore {
-class AudioParam;
-
class DelayNode : public AudioBasicProcessorNode {
public:
- static PassRefPtr<DelayNode> create(AudioContext* context, float sampleRate, double maxDelayTime, ExceptionCode& ec)
- {
- return adoptRef(new DelayNode(context, sampleRate, maxDelayTime, ec));
- }
+ static ExceptionOr<Ref<DelayNode>> create(AudioContext&, float sampleRate, double maxDelayTime);
AudioParam* delayTime();
private:
- DelayNode(AudioContext*, float sampleRate, double maxDelayTime, ExceptionCode&);
-
- DelayProcessor* delayProcessor() { return static_cast<DelayProcessor*>(processor()); }
+ DelayNode(AudioContext&, float sampleRate, double maxDelayTime);
};
} // namespace WebCore
-
-#endif // DelayNode_h
diff --git a/Source/WebCore/Modules/webaudio/DelayProcessor.cpp b/Source/WebCore/Modules/webaudio/DelayProcessor.cpp
index a5cc77559..a75ba6b7c 100644
--- a/Source/WebCore/Modules/webaudio/DelayProcessor.cpp
+++ b/Source/WebCore/Modules/webaudio/DelayProcessor.cpp
@@ -32,7 +32,7 @@
namespace WebCore {
-DelayProcessor::DelayProcessor(AudioContext* context, float sampleRate, unsigned numberOfChannels, double maxDelayTime)
+DelayProcessor::DelayProcessor(AudioContext& context, float sampleRate, unsigned numberOfChannels, double maxDelayTime)
: AudioDSPKernelProcessor(sampleRate, numberOfChannels)
, m_maxDelayTime(maxDelayTime)
{
diff --git a/Source/WebCore/Modules/webaudio/DelayProcessor.h b/Source/WebCore/Modules/webaudio/DelayProcessor.h
index 042395869..d08257703 100644
--- a/Source/WebCore/Modules/webaudio/DelayProcessor.h
+++ b/Source/WebCore/Modules/webaudio/DelayProcessor.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DelayProcessor_h
-#define DelayProcessor_h
+#pragma once
#include "AudioDSPKernelProcessor.h"
#include "AudioParam.h"
@@ -36,10 +35,10 @@ class AudioDSPKernel;
class DelayProcessor : public AudioDSPKernelProcessor {
public:
- DelayProcessor(AudioContext*, float sampleRate, unsigned numberOfChannels, double maxDelayTime);
+ DelayProcessor(AudioContext&, float sampleRate, unsigned numberOfChannels, double maxDelayTime);
virtual ~DelayProcessor();
- virtual std::unique_ptr<AudioDSPKernel> createKernel() override;
+ std::unique_ptr<AudioDSPKernel> createKernel() override;
AudioParam* delayTime() const { return m_delayTime.get(); }
@@ -51,5 +50,3 @@ private:
};
} // namespace WebCore
-
-#endif // DelayProcessor_h
diff --git a/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp b/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp
index 7b80242e4..a1ae66a28 100644
--- a/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp
+++ b/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.cpp
@@ -38,7 +38,7 @@ static const unsigned defaultNumberOfOutputChannels = 2;
namespace WebCore {
-DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* context, float sampleRate)
+DynamicsCompressorNode::DynamicsCompressorNode(AudioContext& context, float sampleRate)
: AudioNode(context, sampleRate)
{
addInput(std::make_unique<AudioNodeInput>(this));
diff --git a/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h b/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h
index 123c0ca92..7f9d83558 100644
--- a/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h
+++ b/Source/WebCore/Modules/webaudio/DynamicsCompressorNode.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DynamicsCompressorNode_h
-#define DynamicsCompressorNode_h
+#pragma once
#include "AudioNode.h"
#include "AudioParam.h"
@@ -35,18 +34,18 @@ class DynamicsCompressor;
class DynamicsCompressorNode : public AudioNode {
public:
- static PassRefPtr<DynamicsCompressorNode> create(AudioContext* context, float sampleRate)
+ static Ref<DynamicsCompressorNode> create(AudioContext& context, float sampleRate)
{
- return adoptRef(new DynamicsCompressorNode(context, sampleRate));
+ return adoptRef(*new DynamicsCompressorNode(context, sampleRate));
}
virtual ~DynamicsCompressorNode();
// AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
- virtual void initialize() override;
- virtual void uninitialize() override;
+ void process(size_t framesToProcess) override;
+ void reset() override;
+ void initialize() override;
+ void uninitialize() override;
// Static compression curve parameters.
AudioParam* threshold() { return m_threshold.get(); }
@@ -59,10 +58,10 @@ public:
AudioParam* reduction() { return m_reduction.get(); }
private:
- virtual double tailTime() const override;
- virtual double latencyTime() const override;
+ double tailTime() const override;
+ double latencyTime() const override;
- DynamicsCompressorNode(AudioContext*, float sampleRate);
+ DynamicsCompressorNode(AudioContext&, float sampleRate);
std::unique_ptr<DynamicsCompressor> m_dynamicsCompressor;
RefPtr<AudioParam> m_threshold;
@@ -74,5 +73,3 @@ private:
};
} // namespace WebCore
-
-#endif // DynamicsCompressorNode_h
diff --git a/Source/WebCore/Modules/webaudio/GainNode.cpp b/Source/WebCore/Modules/webaudio/GainNode.cpp
index d322c133a..fba5ec459 100644
--- a/Source/WebCore/Modules/webaudio/GainNode.cpp
+++ b/Source/WebCore/Modules/webaudio/GainNode.cpp
@@ -34,7 +34,7 @@
namespace WebCore {
-GainNode::GainNode(AudioContext* context, float sampleRate)
+GainNode::GainNode(AudioContext& context, float sampleRate)
: AudioNode(context, sampleRate)
, m_lastGain(1.0)
, m_sampleAccurateGainValues(AudioNode::ProcessingSizeInFrames) // FIXME: can probably share temp buffer in context
@@ -91,7 +91,7 @@ void GainNode::reset()
// uninitialize and then re-initialize with the new channel count.
void GainNode::checkNumberOfChannelsForInput(AudioNodeInput* input)
{
- ASSERT(context()->isAudioThread() && context()->isGraphOwner());
+ ASSERT(context().isAudioThread() && context().isGraphOwner());
ASSERT(input && input == this->input(0));
if (input != this->input(0))
diff --git a/Source/WebCore/Modules/webaudio/GainNode.h b/Source/WebCore/Modules/webaudio/GainNode.h
index 3a8c5dfd8..94352b0d7 100644
--- a/Source/WebCore/Modules/webaudio/GainNode.h
+++ b/Source/WebCore/Modules/webaudio/GainNode.h
@@ -22,12 +22,10 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GainNode_h
-#define GainNode_h
+#pragma once
#include "AudioNode.h"
#include "AudioParam.h"
-#include <wtf/PassRefPtr.h>
#include <wtf/Threading.h>
namespace WebCore {
@@ -39,26 +37,26 @@ class AudioContext;
class GainNode : public AudioNode {
public:
- static PassRefPtr<GainNode> create(AudioContext* context, float sampleRate)
+ static Ref<GainNode> create(AudioContext& context, float sampleRate)
{
- return adoptRef(new GainNode(context, sampleRate));
+ return adoptRef(*new GainNode(context, sampleRate));
}
// AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
+ void process(size_t framesToProcess) override;
+ void reset() override;
// Called in the main thread when the number of channels for the input may have changed.
- virtual void checkNumberOfChannelsForInput(AudioNodeInput*) override;
+ void checkNumberOfChannelsForInput(AudioNodeInput*) override;
// JavaScript interface
AudioParam* gain() { return m_gain.get(); }
private:
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ double tailTime() const override { return 0; }
+ double latencyTime() const override { return 0; }
- GainNode(AudioContext*, float sampleRate);
+ GainNode(AudioContext&, float sampleRate);
float m_lastGain; // for de-zippering
RefPtr<AudioParam> m_gain;
@@ -67,5 +65,3 @@ private:
};
} // namespace WebCore
-
-#endif // GainNode_h
diff --git a/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp b/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp
index 0e50258ac..77c7fbccf 100644
--- a/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp
+++ b/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.cpp
@@ -40,13 +40,13 @@ const unsigned maxSampleRate = 192000;
namespace WebCore {
-PassRefPtr<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(AudioContext* context, HTMLMediaElement* mediaElement)
+Ref<MediaElementAudioSourceNode> MediaElementAudioSourceNode::create(AudioContext& context, HTMLMediaElement& mediaElement)
{
- return adoptRef(new MediaElementAudioSourceNode(context, mediaElement));
+ return adoptRef(*new MediaElementAudioSourceNode(context, mediaElement));
}
-MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* context, HTMLMediaElement* mediaElement)
- : AudioNode(context, context->sampleRate())
+MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext& context, HTMLMediaElement& mediaElement)
+ : AudioNode(context, context.sampleRate())
, m_mediaElement(mediaElement)
, m_sourceNumberOfChannels(0)
, m_sourceSampleRate(0)
@@ -61,7 +61,7 @@ MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* context,
MediaElementAudioSourceNode::~MediaElementAudioSourceNode()
{
- m_mediaElement->setAudioSourceNode(0);
+ m_mediaElement->setAudioSourceNode(nullptr);
uninitialize();
}
@@ -92,7 +92,7 @@ void MediaElementAudioSourceNode::setFormat(size_t numberOfChannels, float sourc
{
// The context must be locked when changing the number of output channels.
- AudioContext::AutoLocker contextLocker(*context());
+ AudioContext::AutoLocker contextLocker(context());
// Do any necesssary re-configuration to the output's number of channels.
output(0)->setNumberOfChannels(numberOfChannels);
@@ -104,7 +104,7 @@ void MediaElementAudioSourceNode::process(size_t numberOfFrames)
{
AudioBus* outputBus = output(0)->bus();
- if (!mediaElement() || !m_sourceNumberOfChannels || !m_sourceSampleRate) {
+ if (!m_sourceNumberOfChannels || !m_sourceSampleRate) {
outputBus->zero();
return;
}
@@ -112,14 +112,14 @@ void MediaElementAudioSourceNode::process(size_t numberOfFrames)
// Use a std::try_to_lock to avoid contention in the real-time audio thread.
// If we fail to acquire the lock then the HTMLMediaElement must be in the middle of
// reconfiguring its playback engine, so we output silence in this case.
- std::unique_lock<std::mutex> lock(m_processMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_processMutex, std::try_to_lock);
if (!lock.owns_lock()) {
// We failed to acquire the lock.
outputBus->zero();
return;
}
- if (AudioSourceProvider* provider = mediaElement()->audioSourceProvider()) {
+ if (AudioSourceProvider* provider = mediaElement().audioSourceProvider()) {
if (m_multiChannelResampler.get()) {
ASSERT(m_sourceSampleRate != sampleRate());
m_multiChannelResampler->process(provider, outputBus, numberOfFrames);
diff --git a/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h b/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h
index 7f58ae7b2..0cafdc945 100644
--- a/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h
+++ b/Source/WebCore/Modules/webaudio/MediaElementAudioSourceNode.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MediaElementAudioSourceNode_h
-#define MediaElementAudioSourceNode_h
+#pragma once
#if ENABLE(WEB_AUDIO) && ENABLE(VIDEO)
@@ -32,8 +31,7 @@
#include "HTMLMediaElement.h"
#include "MultiChannelResampler.h"
#include <memory>
-#include <mutex>
-#include <wtf/PassRefPtr.h>
+#include <wtf/Lock.h>
namespace WebCore {
@@ -41,33 +39,33 @@ class AudioContext;
class MediaElementAudioSourceNode : public AudioNode, public AudioSourceProviderClient {
public:
- static PassRefPtr<MediaElementAudioSourceNode> create(AudioContext*, HTMLMediaElement*);
+ static Ref<MediaElementAudioSourceNode> create(AudioContext&, HTMLMediaElement&);
virtual ~MediaElementAudioSourceNode();
- HTMLMediaElement* mediaElement() { return m_mediaElement.get(); }
+ HTMLMediaElement& mediaElement() { return m_mediaElement; }
// AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
+ void process(size_t framesToProcess) override;
+ void reset() override;
// AudioSourceProviderClient
- virtual void setFormat(size_t numberOfChannels, float sampleRate) override;
+ void setFormat(size_t numberOfChannels, float sampleRate) override;
void lock();
void unlock();
private:
- MediaElementAudioSourceNode(AudioContext*, HTMLMediaElement*);
+ MediaElementAudioSourceNode(AudioContext&, HTMLMediaElement&);
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ double tailTime() const override { return 0; }
+ double latencyTime() const override { return 0; }
// As an audio source, we will never propagate silence.
- virtual bool propagatesSilence() const override { return false; }
+ bool propagatesSilence() const override { return false; }
- RefPtr<HTMLMediaElement> m_mediaElement;
- std::mutex m_processMutex;
+ Ref<HTMLMediaElement> m_mediaElement;
+ Lock m_processMutex;
unsigned m_sourceNumberOfChannels;
double m_sourceSampleRate;
@@ -78,5 +76,3 @@ private:
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO) && ENABLE(VIDEO)
-
-#endif // MediaElementAudioSourceNode_h
diff --git a/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.cpp b/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.cpp
index 2103edf5c..1b9072ae2 100644
--- a/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.cpp
+++ b/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.cpp
@@ -23,42 +23,40 @@
*/
#include "config.h"
+#include "MediaStreamAudioDestinationNode.h"
#if ENABLE(WEB_AUDIO) && ENABLE(MEDIA_STREAM)
-#include "MediaStreamAudioDestinationNode.h"
-
#include "AudioContext.h"
#include "AudioNodeInput.h"
#include "MediaStream.h"
#include "MediaStreamAudioSource.h"
-#include "RTCPeerConnectionHandler.h"
#include <wtf/Locker.h>
namespace WebCore {
-PassRefPtr<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(AudioContext* context, size_t numberOfChannels)
+Ref<MediaStreamAudioDestinationNode> MediaStreamAudioDestinationNode::create(AudioContext& context, size_t numberOfChannels)
{
- return adoptRef(new MediaStreamAudioDestinationNode(context, numberOfChannels));
+ return adoptRef(*new MediaStreamAudioDestinationNode(context, numberOfChannels));
}
-MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext* context, size_t numberOfChannels)
- : AudioBasicInspectorNode(context, context->sampleRate(), numberOfChannels)
+MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext& context, size_t numberOfChannels)
+ : AudioBasicInspectorNode(context, context.sampleRate(), numberOfChannels)
, m_mixBus(AudioBus::create(numberOfChannels, ProcessingSizeInFrames))
{
setNodeType(NodeTypeMediaStreamAudioDestination);
m_source = MediaStreamAudioSource::create();
- Vector<RefPtr<MediaStreamSource>> audioSources;
- audioSources.append(m_source);
- m_stream = MediaStream::create(*context->scriptExecutionContext(), MediaStreamPrivate::create(audioSources, Vector<RefPtr<MediaStreamSource>>()));
+ Vector<Ref<RealtimeMediaSource>> audioSources;
+ audioSources.append(*m_source);
+ m_stream = MediaStream::create(*context.scriptExecutionContext(), MediaStreamPrivate::create(audioSources, { }));
- m_source->setAudioFormat(numberOfChannels, context->sampleRate());
+ m_source->setAudioFormat(numberOfChannels, context.sampleRate());
initialize();
}
-MediaStreamSource* MediaStreamAudioDestinationNode::mediaStreamSource()
+RealtimeMediaSource* MediaStreamAudioDestinationNode::mediaStreamSource()
{
return m_source.get();
}
diff --git a/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h b/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h
index 7f300076f..f17c10f2a 100644
--- a/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h
+++ b/Source/WebCore/Modules/webaudio/MediaStreamAudioDestinationNode.h
@@ -22,15 +22,13 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MediaStreamAudioDestinationNode_h
-#define MediaStreamAudioDestinationNode_h
+#pragma once
#if ENABLE(WEB_AUDIO) && ENABLE(MEDIA_STREAM)
#include "AudioBasicInspectorNode.h"
#include "AudioBus.h"
#include "MediaStream.h"
-#include <wtf/PassRefPtr.h>
namespace WebCore {
@@ -39,26 +37,26 @@ class MediaStreamAudioSource;
class MediaStreamAudioDestinationNode : public AudioBasicInspectorNode {
public:
- static PassRefPtr<MediaStreamAudioDestinationNode> create(AudioContext*, size_t numberOfChannels);
+ static Ref<MediaStreamAudioDestinationNode> create(AudioContext&, size_t numberOfChannels);
virtual ~MediaStreamAudioDestinationNode();
MediaStream* stream() { return m_stream.get(); }
// AudioNode.
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ void process(size_t framesToProcess) override;
+ void reset() override;
- MediaStreamSource* mediaStreamSource();
+ RealtimeMediaSource* mediaStreamSource();
private:
- MediaStreamAudioDestinationNode(AudioContext*, size_t numberOfChannels);
+ MediaStreamAudioDestinationNode(AudioContext&, size_t numberOfChannels);
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ double tailTime() const override { return 0; }
+ double latencyTime() const override { return 0; }
// As an audio source, we will never propagate silence.
- virtual bool propagatesSilence() const override { return false; }
+ bool propagatesSilence() const override { return false; }
RefPtr<MediaStream> m_stream;
RefPtr<MediaStreamAudioSource> m_source;
@@ -68,5 +66,3 @@ private:
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO) && ENABLE(MEDIA_STREAM)
-
-#endif // MediaStreamAudioDestinationNode_h
diff --git a/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.cpp b/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.cpp
index ffabd5ed2..4c274aee5 100644
--- a/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.cpp
+++ b/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.cpp
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -24,27 +24,27 @@
*/
#include "config.h"
+#include "MediaStreamAudioSource.h"
#if ENABLE(MEDIA_STREAM)
-#include "MediaStreamAudioSource.h"
-
+#include "AudioSourceProvider.h"
#include "NotImplemented.h"
#include "UUID.h"
namespace WebCore {
-RefPtr<MediaStreamAudioSource> MediaStreamAudioSource::create()
+Ref<MediaStreamAudioSource> MediaStreamAudioSource::create()
{
- return adoptRef(new MediaStreamAudioSource());
+ return adoptRef(*new MediaStreamAudioSource());
}
MediaStreamAudioSource::MediaStreamAudioSource()
- : MediaStreamSource(ASCIILiteral("WebAudio-") + createCanonicalUUIDString(), MediaStreamSource::Audio, "MediaStreamAudioDestinationNode")
+ : RealtimeMediaSource(ASCIILiteral("WebAudio-") + createCanonicalUUIDString(), RealtimeMediaSource::Audio, "MediaStreamAudioDestinationNode")
{
}
-RefPtr<MediaStreamSourceCapabilities> MediaStreamAudioSource::capabilities() const
+RefPtr<RealtimeMediaSourceCapabilities> MediaStreamAudioSource::capabilities() const
{
// FIXME: implement this.
// https://bugs.webkit.org/show_bug.cgi?id=122430
@@ -52,24 +52,30 @@ RefPtr<MediaStreamSourceCapabilities> MediaStreamAudioSource::capabilities() con
return nullptr;
}
-const MediaStreamSourceStates& MediaStreamAudioSource::states()
+const RealtimeMediaSourceSettings& MediaStreamAudioSource::settings() const
{
// FIXME: implement this.
// https://bugs.webkit.org/show_bug.cgi?id=122430
notImplemented();
- return m_currentStates;
-
+ return m_currentSettings;
+}
+
+AudioSourceProvider* MediaStreamAudioSource::audioSourceProvider()
+{
+ // FIXME: implement this.
+ notImplemented();
+ return nullptr;
}
-void MediaStreamAudioSource::addAudioConsumer(PassRefPtr<AudioDestinationConsumer> consumer)
+void MediaStreamAudioSource::addAudioConsumer(AudioDestinationConsumer* consumer)
{
- MutexLocker locker(m_audioConsumersLock);
+ LockHolder locker(m_audioConsumersLock);
m_audioConsumers.append(consumer);
}
bool MediaStreamAudioSource::removeAudioConsumer(AudioDestinationConsumer* consumer)
{
- MutexLocker locker(m_audioConsumersLock);
+ LockHolder locker(m_audioConsumersLock);
size_t pos = m_audioConsumers.find(consumer);
if (pos != notFound) {
m_audioConsumers.remove(pos);
@@ -80,16 +86,16 @@ bool MediaStreamAudioSource::removeAudioConsumer(AudioDestinationConsumer* consu
void MediaStreamAudioSource::setAudioFormat(size_t numberOfChannels, float sampleRate)
{
- MutexLocker locker(m_audioConsumersLock);
- for (Vector<RefPtr<AudioDestinationConsumer>>::iterator it = m_audioConsumers.begin(); it != m_audioConsumers.end(); ++it)
- (*it)->setFormat(numberOfChannels, sampleRate);
+ LockHolder locker(m_audioConsumersLock);
+ for (auto& consumer : m_audioConsumers)
+ consumer->setFormat(numberOfChannels, sampleRate);
}
void MediaStreamAudioSource::consumeAudio(AudioBus* bus, size_t numberOfFrames)
{
- MutexLocker locker(m_audioConsumersLock);
- for (Vector<RefPtr<AudioDestinationConsumer>>::iterator it = m_audioConsumers.begin(); it != m_audioConsumers.end(); ++it)
- (*it)->consumeAudio(bus, numberOfFrames);
+ LockHolder locker(m_audioConsumersLock);
+ for (auto& consumer : m_audioConsumers)
+ consumer->consumeAudio(bus, numberOfFrames);
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.h b/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.h
index 5cebac1b7..a3eed5147 100644
--- a/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.h
+++ b/Source/WebCore/Modules/webaudio/MediaStreamAudioSource.h
@@ -10,10 +10,10 @@
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
- * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@@ -23,13 +23,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MediaStreamAudioSource_h
-#define MediaStreamAudioSource_h
+#pragma once
#if ENABLE(MEDIA_STREAM)
#include "AudioDestinationConsumer.h"
-#include "MediaStreamSource.h"
+#include "RealtimeMediaSource.h"
+#include <wtf/Lock.h>
#include <wtf/RefCounted.h>
#include <wtf/ThreadingPrimitives.h>
#include <wtf/Vector.h>
@@ -38,40 +38,38 @@
namespace WebCore {
class AudioBus;
-class MediaStreamSourceCapabilities;
+class RealtimeMediaSourceCapabilities;
-class MediaStreamAudioSource : public MediaStreamSource {
+class MediaStreamAudioSource final : public RealtimeMediaSource {
public:
- static RefPtr<MediaStreamAudioSource> create();
+ static Ref<MediaStreamAudioSource> create();
~MediaStreamAudioSource() { }
- virtual bool useIDForTrackID() const { return true; }
+ RefPtr<RealtimeMediaSourceCapabilities> capabilities() const final;
+ const RealtimeMediaSourceSettings& settings() const final;
- virtual RefPtr<MediaStreamSourceCapabilities> capabilities() const;
- virtual const MediaStreamSourceStates& states();
-
const String& deviceId() const { return m_deviceId; }
void setDeviceId(const String& deviceId) { m_deviceId = deviceId; }
void setAudioFormat(size_t numberOfChannels, float sampleRate);
void consumeAudio(AudioBus*, size_t numberOfFrames);
- void addAudioConsumer(PassRefPtr<AudioDestinationConsumer>);
+ void addAudioConsumer(AudioDestinationConsumer*);
bool removeAudioConsumer(AudioDestinationConsumer*);
const Vector<RefPtr<AudioDestinationConsumer>>& audioConsumers() const { return m_audioConsumers; }
private:
MediaStreamAudioSource();
+ AudioSourceProvider* audioSourceProvider() override;
+
String m_deviceId;
- Mutex m_audioConsumersLock;
+ Lock m_audioConsumersLock;
Vector<RefPtr<AudioDestinationConsumer>> m_audioConsumers;
- MediaStreamSourceStates m_currentStates;
+ RealtimeMediaSourceSettings m_currentSettings;
};
} // namespace WebCore
#endif // ENABLE(MEDIA_STREAM)
-
-#endif // MediaStreamAudioSource_h
diff --git a/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp b/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp
index 867212e51..f640a6826 100644
--- a/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp
+++ b/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.cpp
@@ -35,18 +35,21 @@
namespace WebCore {
-PassRefPtr<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::create(AudioContext* context, MediaStream* mediaStream, MediaStreamTrack* audioTrack, AudioSourceProvider* audioSourceProvider)
+Ref<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::create(AudioContext& context, MediaStream& mediaStream, MediaStreamTrack& audioTrack)
{
- return adoptRef(new MediaStreamAudioSourceNode(context, mediaStream, audioTrack, audioSourceProvider));
+ return adoptRef(*new MediaStreamAudioSourceNode(context, mediaStream, audioTrack));
}
-MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* context, MediaStream* mediaStream, MediaStreamTrack* audioTrack, AudioSourceProvider* audioSourceProvider)
- : AudioNode(context, context->sampleRate())
+MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext& context, MediaStream& mediaStream, MediaStreamTrack& audioTrack)
+ : AudioNode(context, context.sampleRate())
, m_mediaStream(mediaStream)
, m_audioTrack(audioTrack)
- , m_audioSourceProvider(audioSourceProvider)
- , m_sourceNumberOfChannels(0)
{
+ AudioSourceProvider* audioSourceProvider = m_audioTrack->audioSourceProvider();
+ ASSERT(audioSourceProvider);
+
+ audioSourceProvider->setClient(this);
+
// Default to stereo. This could change depending on the format of the MediaStream's audio track.
addOutput(std::make_unique<AudioNodeOutput>(this, 2));
@@ -57,45 +60,56 @@ MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* context, Me
MediaStreamAudioSourceNode::~MediaStreamAudioSourceNode()
{
+ AudioSourceProvider* audioSourceProvider = m_audioTrack->audioSourceProvider();
+ ASSERT(audioSourceProvider);
+ audioSourceProvider->setClient(nullptr);
uninitialize();
}
void MediaStreamAudioSourceNode::setFormat(size_t numberOfChannels, float sourceSampleRate)
{
- if (numberOfChannels != m_sourceNumberOfChannels || sourceSampleRate != sampleRate()) {
- // The sample-rate must be equal to the context's sample-rate.
- if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels() || sourceSampleRate != sampleRate()) {
- // process() will generate silence for these uninitialized values.
- LOG(Media, "MediaStreamAudioSourceNode::setFormat(%u, %f) - unhandled format change", static_cast<unsigned>(numberOfChannels), sourceSampleRate);
- m_sourceNumberOfChannels = 0;
- return;
- }
-
- // Synchronize with process().
- std::lock_guard<std::mutex> lock(m_processMutex);
-
- m_sourceNumberOfChannels = numberOfChannels;
-
- {
- // The context must be locked when changing the number of output channels.
- AudioContext::AutoLocker contextLocker(*context());
-
- // Do any necesssary re-configuration to the output's number of channels.
- output(0)->setNumberOfChannels(numberOfChannels);
- }
+ float sampleRate = this->sampleRate();
+ if (numberOfChannels == m_sourceNumberOfChannels && sourceSampleRate == sampleRate)
+ return;
+
+ // The sample-rate must be equal to the context's sample-rate.
+ if (!numberOfChannels || numberOfChannels > AudioContext::maxNumberOfChannels() || sourceSampleRate != sampleRate) {
+ // process() will generate silence for these uninitialized values.
+ LOG(Media, "MediaStreamAudioSourceNode::setFormat(%u, %f) - unhandled format change", static_cast<unsigned>(numberOfChannels), sourceSampleRate);
+ m_sourceNumberOfChannels = 0;
+ return;
+ }
+
+ // Synchronize with process().
+ std::lock_guard<Lock> lock(m_processMutex);
+
+ m_sourceNumberOfChannels = numberOfChannels;
+ m_sourceSampleRate = sourceSampleRate;
+
+ if (sourceSampleRate == sampleRate)
+ m_multiChannelResampler = nullptr;
+ else {
+ double scaleFactor = sourceSampleRate / sampleRate;
+ m_multiChannelResampler = std::make_unique<MultiChannelResampler>(scaleFactor, numberOfChannels);
+ }
+
+ m_sourceNumberOfChannels = numberOfChannels;
+
+ {
+ // The context must be locked when changing the number of output channels.
+ AudioContext::AutoLocker contextLocker(context());
+
+ // Do any necesssary re-configuration to the output's number of channels.
+ output(0)->setNumberOfChannels(numberOfChannels);
}
}
void MediaStreamAudioSourceNode::process(size_t numberOfFrames)
{
AudioBus* outputBus = output(0)->bus();
+ AudioSourceProvider* provider = m_audioTrack->audioSourceProvider();
- if (!audioSourceProvider()) {
- outputBus->zero();
- return;
- }
-
- if (!mediaStream() || m_sourceNumberOfChannels != outputBus->numberOfChannels()) {
+ if (!mediaStream() || !m_sourceNumberOfChannels || !m_sourceSampleRate || !provider) {
outputBus->zero();
return;
}
@@ -103,18 +117,21 @@ void MediaStreamAudioSourceNode::process(size_t numberOfFrames)
// Use std::try_to_lock to avoid contention in the real-time audio thread.
// If we fail to acquire the lock then the MediaStream must be in the middle of
// a format change, so we output silence in this case.
- std::unique_lock<std::mutex> lock(m_processMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_processMutex, std::try_to_lock);
if (!lock.owns_lock()) {
// We failed to acquire the lock.
outputBus->zero();
return;
}
- audioSourceProvider()->provideInput(outputBus, numberOfFrames);
-}
-
-void MediaStreamAudioSourceNode::reset()
-{
+ if (m_multiChannelResampler.get()) {
+ ASSERT(m_sourceSampleRate != sampleRate());
+ m_multiChannelResampler->process(provider, outputBus, numberOfFrames);
+ } else {
+ // Bypass the resampler completely if the source is at the context's sample-rate.
+ ASSERT(m_sourceSampleRate == sampleRate());
+ provider->provideInput(outputBus, numberOfFrames);
+ }
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.h b/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.h
index 7ff95adfe..bcdcfc66f 100644
--- a/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.h
+++ b/Source/WebCore/Modules/webaudio/MediaStreamAudioSourceNode.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MediaStreamAudioSourceNode_h
-#define MediaStreamAudioSourceNode_h
+#pragma once
#if ENABLE(WEB_AUDIO) && ENABLE(MEDIA_STREAM)
@@ -31,50 +30,49 @@
#include "AudioSourceProvider.h"
#include "AudioSourceProviderClient.h"
#include "MediaStream.h"
-#include <mutex>
-#include <wtf/PassRefPtr.h>
+#include "MultiChannelResampler.h"
+#include <wtf/Lock.h>
+#include <wtf/RefPtr.h>
namespace WebCore {
class AudioContext;
+class MultiChannelResampler;
class MediaStreamAudioSourceNode : public AudioNode, public AudioSourceProviderClient {
public:
- static PassRefPtr<MediaStreamAudioSourceNode> create(AudioContext*, MediaStream*, MediaStreamTrack*, AudioSourceProvider*);
+ static Ref<MediaStreamAudioSourceNode> create(AudioContext&, MediaStream&, MediaStreamTrack&);
virtual ~MediaStreamAudioSourceNode();
- MediaStream* mediaStream() { return m_mediaStream.get(); }
+ MediaStream* mediaStream() { return &m_mediaStream.get(); }
// AudioNode
- virtual void process(size_t framesToProcess);
- virtual void reset();
+ void process(size_t framesToProcess) override;
+ void reset() override { }
// AudioSourceProviderClient
- virtual void setFormat(size_t numberOfChannels, float sampleRate);
-
- AudioSourceProvider* audioSourceProvider() const { return m_audioSourceProvider; }
+ void setFormat(size_t numberOfChannels, float sampleRate) override;
private:
- MediaStreamAudioSourceNode(AudioContext*, MediaStream*, MediaStreamTrack*, AudioSourceProvider*);
+ MediaStreamAudioSourceNode(AudioContext&, MediaStream&, MediaStreamTrack&);
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ double tailTime() const override { return 0; }
+ double latencyTime() const override { return 0; }
// As an audio source, we will never propagate silence.
- virtual bool propagatesSilence() const override { return false; }
+ bool propagatesSilence() const override { return false; }
- RefPtr<MediaStream> m_mediaStream;
- RefPtr<MediaStreamTrack> m_audioTrack;
- AudioSourceProvider* m_audioSourceProvider;
+ Ref<MediaStream> m_mediaStream;
+ Ref<MediaStreamTrack> m_audioTrack;
+ std::unique_ptr<MultiChannelResampler> m_multiChannelResampler;
- std::mutex m_processMutex;
+ Lock m_processMutex;
- unsigned m_sourceNumberOfChannels;
+ unsigned m_sourceNumberOfChannels { 0 };
+ double m_sourceSampleRate { 0 };
};
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO) && ENABLE(MEDIA_STREAM)
-
-#endif // MediaStreamAudioSourceNode_h
diff --git a/Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.cpp b/Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.cpp
index 7db5d78b7..f56ae51aa 100644
--- a/Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.cpp
+++ b/Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.cpp
@@ -33,24 +33,14 @@
namespace WebCore {
-PassRefPtr<OfflineAudioCompletionEvent> OfflineAudioCompletionEvent::create()
+Ref<OfflineAudioCompletionEvent> OfflineAudioCompletionEvent::create(RefPtr<AudioBuffer>&& renderedBuffer)
{
- return adoptRef(new OfflineAudioCompletionEvent);
+ return adoptRef(*new OfflineAudioCompletionEvent(WTFMove(renderedBuffer)));
}
-PassRefPtr<OfflineAudioCompletionEvent> OfflineAudioCompletionEvent::create(PassRefPtr<AudioBuffer> renderedBuffer)
-{
- return adoptRef(new OfflineAudioCompletionEvent(renderedBuffer));
-}
-
-OfflineAudioCompletionEvent::OfflineAudioCompletionEvent()
-{
-}
-
-
-OfflineAudioCompletionEvent::OfflineAudioCompletionEvent(PassRefPtr<AudioBuffer> renderedBuffer)
+OfflineAudioCompletionEvent::OfflineAudioCompletionEvent(RefPtr<AudioBuffer>&& renderedBuffer)
: Event(eventNames().completeEvent, true, false)
- , m_renderedBuffer(renderedBuffer)
+ , m_renderedBuffer(WTFMove(renderedBuffer))
{
}
diff --git a/Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.h b/Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.h
index 72a7b7921..7ac3b0e2c 100644
--- a/Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.h
+++ b/Source/WebCore/Modules/webaudio/OfflineAudioCompletionEvent.h
@@ -22,36 +22,28 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef OfflineAudioCompletionEvent_h
-#define OfflineAudioCompletionEvent_h
+#pragma once
#include "AudioBuffer.h"
#include "Event.h"
-#include <wtf/PassRefPtr.h>
#include <wtf/RefPtr.h>
namespace WebCore {
-class AudioBuffer;
-
class OfflineAudioCompletionEvent : public Event {
public:
- static PassRefPtr<OfflineAudioCompletionEvent> create();
- static PassRefPtr<OfflineAudioCompletionEvent> create(PassRefPtr<AudioBuffer> renderedBuffer);
+ static Ref<OfflineAudioCompletionEvent> create(RefPtr<AudioBuffer>&& renderedBuffer);
virtual ~OfflineAudioCompletionEvent();
AudioBuffer* renderedBuffer() { return m_renderedBuffer.get(); }
- virtual EventInterface eventInterface() const override;
+ EventInterface eventInterface() const override;
private:
- OfflineAudioCompletionEvent();
- explicit OfflineAudioCompletionEvent(PassRefPtr<AudioBuffer> renderedBuffer);
+ explicit OfflineAudioCompletionEvent(RefPtr<AudioBuffer>&& renderedBuffer);
RefPtr<AudioBuffer> m_renderedBuffer;
};
} // namespace WebCore
-
-#endif // OfflineAudioCompletionEvent_h
diff --git a/Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp b/Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp
index 3bece3019..ba5e49fae 100644
--- a/Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp
+++ b/Source/WebCore/Modules/webaudio/OfflineAudioContext.cpp
@@ -29,38 +29,24 @@
#include "OfflineAudioContext.h"
#include "Document.h"
-#include "ExceptionCode.h"
-#include "ScriptExecutionContext.h"
namespace WebCore {
-PassRefPtr<OfflineAudioContext> OfflineAudioContext::create(ScriptExecutionContext& context, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
-{
- // FIXME: add support for workers.
- if (!context.isDocument()) {
- ec = NOT_SUPPORTED_ERR;
- return nullptr;
- }
-
- Document& document = toDocument(context);
-
- if (numberOfChannels > 10 || !isSampleRateRangeGood(sampleRate)) {
- ec = SYNTAX_ERR;
- return nullptr;
- }
-
- RefPtr<OfflineAudioContext> audioContext(adoptRef(new OfflineAudioContext(document, numberOfChannels, numberOfFrames, sampleRate)));
- audioContext->suspendIfNeeded();
- return audioContext.release();
-}
-
-OfflineAudioContext::OfflineAudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
+inline OfflineAudioContext::OfflineAudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
: AudioContext(document, numberOfChannels, numberOfFrames, sampleRate)
{
}
-OfflineAudioContext::~OfflineAudioContext()
+ExceptionOr<Ref<OfflineAudioContext>> OfflineAudioContext::create(ScriptExecutionContext& context, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
{
+ // FIXME: Add support for workers.
+ if (!is<Document>(context))
+ return Exception { NOT_SUPPORTED_ERR };
+ if (!numberOfChannels || numberOfChannels > 10 || !numberOfFrames || !isSampleRateRangeGood(sampleRate))
+ return Exception { SYNTAX_ERR };
+ auto audioContext = adoptRef(*new OfflineAudioContext(downcast<Document>(context), numberOfChannels, numberOfFrames, sampleRate));
+ audioContext->suspendIfNeeded();
+ return WTFMove(audioContext);
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/OfflineAudioContext.h b/Source/WebCore/Modules/webaudio/OfflineAudioContext.h
index 35bbc9663..5d2c1d4f9 100644
--- a/Source/WebCore/Modules/webaudio/OfflineAudioContext.h
+++ b/Source/WebCore/Modules/webaudio/OfflineAudioContext.h
@@ -22,23 +22,18 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef OfflineAudioContext_h
-#define OfflineAudioContext_h
+#pragma once
#include "AudioContext.h"
namespace WebCore {
-class OfflineAudioContext : public AudioContext {
+class OfflineAudioContext final : public AudioContext {
public:
- static PassRefPtr<OfflineAudioContext> create(ScriptExecutionContext&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
-
- virtual ~OfflineAudioContext();
+ static ExceptionOr<Ref<OfflineAudioContext>> create(ScriptExecutionContext&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
private:
OfflineAudioContext(Document&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
};
} // namespace WebCore
-
-#endif // OfflineAudioContext_h
diff --git a/Source/WebCore/Modules/webaudio/OfflineAudioContext.idl b/Source/WebCore/Modules/webaudio/OfflineAudioContext.idl
index e4ecd5d8d..c6e0d74f0 100644
--- a/Source/WebCore/Modules/webaudio/OfflineAudioContext.idl
+++ b/Source/WebCore/Modules/webaudio/OfflineAudioContext.idl
@@ -24,12 +24,10 @@
[
Conditional=WEB_AUDIO,
- EventTarget,
JSGenerateToJSObject,
- Constructor(unsigned long numberOfChannels, unsigned long numberOfFrames, float sampleRate),
- ConstructorRaisesException,
+ Constructor(unsigned long numberOfChannels, unsigned long numberOfFrames, unrestricted float sampleRate),
+ ConstructorMayThrowException,
ConstructorCallWith=ScriptExecutionContext,
InterfaceName=webkitOfflineAudioContext
] interface OfflineAudioContext : AudioContext {
-
};
diff --git a/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp b/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp
index fd3123a91..505230419 100644
--- a/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp
+++ b/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.cpp
@@ -38,7 +38,7 @@ namespace WebCore {
const size_t renderQuantumSize = 128;
-OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext* context, AudioBuffer* renderTarget)
+OfflineAudioDestinationNode::OfflineAudioDestinationNode(AudioContext& context, AudioBuffer* renderTarget)
: AudioDestinationNode(context, renderTarget->sampleRate())
, m_renderTarget(renderTarget)
, m_renderThread(0)
@@ -101,7 +101,12 @@ void OfflineAudioDestinationNode::offlineRender()
ASSERT(m_renderBus.get());
if (!m_renderBus.get())
return;
-
+
+ bool isAudioContextInitialized = context().isInitialized();
+ ASSERT(isAudioContextInitialized);
+ if (!isAudioContextInitialized)
+ return;
+
bool channelsMatch = m_renderBus->numberOfChannels() == m_renderTarget->numberOfChannels();
ASSERT(channelsMatch);
if (!channelsMatch)
@@ -112,15 +117,6 @@ void OfflineAudioDestinationNode::offlineRender()
if (!isRenderBusAllocated)
return;
- // Synchronize with HRTFDatabaseLoader.
- // The database must be loaded before we can proceed.
- HRTFDatabaseLoader* loader = context()->hrtfDatabaseLoader();
- ASSERT(loader);
- if (!loader)
- return;
-
- loader->waitForLoaderThreadCompletion();
-
// Break up the render target into smaller "render quantize" sized pieces.
// Render until we're finished.
size_t framesToProcess = m_renderTarget->length();
@@ -135,7 +131,7 @@ void OfflineAudioDestinationNode::offlineRender()
for (unsigned channelIndex = 0; channelIndex < numberOfChannels; ++channelIndex) {
const float* source = m_renderBus->channel(channelIndex)->data();
- float* destination = m_renderTarget->getChannelData(channelIndex)->data();
+ float* destination = m_renderTarget->channelData(channelIndex)->data();
memcpy(destination + n, source, sizeof(float) * framesAvailableToCopy);
}
@@ -144,23 +140,15 @@ void OfflineAudioDestinationNode::offlineRender()
}
// Our work is done. Let the AudioContext know.
- callOnMainThread(notifyCompleteDispatch, this);
-}
-
-void OfflineAudioDestinationNode::notifyCompleteDispatch(void* userData)
-{
- OfflineAudioDestinationNode* destinationNode = static_cast<OfflineAudioDestinationNode*>(userData);
- ASSERT(destinationNode);
- if (!destinationNode)
- return;
-
- destinationNode->notifyComplete();
- destinationNode->deref();
+ callOnMainThread([this] {
+ notifyComplete();
+ deref();
+ });
}
void OfflineAudioDestinationNode::notifyComplete()
{
- context()->fireCompletionEvent();
+ context().fireCompletionEvent();
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h b/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h
index eb0ca426c..755604647 100644
--- a/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h
+++ b/Source/WebCore/Modules/webaudio/OfflineAudioDestinationNode.h
@@ -22,12 +22,10 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef OfflineAudioDestinationNode_h
-#define OfflineAudioDestinationNode_h
+#pragma once
#include "AudioBuffer.h"
#include "AudioDestinationNode.h"
-#include <wtf/PassRefPtr.h>
#include <wtf/RefPtr.h>
#include <wtf/Threading.h>
@@ -38,25 +36,25 @@ class AudioContext;
class OfflineAudioDestinationNode : public AudioDestinationNode {
public:
- static PassRefPtr<OfflineAudioDestinationNode> create(AudioContext* context, AudioBuffer* renderTarget)
+ static Ref<OfflineAudioDestinationNode> create(AudioContext& context, AudioBuffer* renderTarget)
{
- return adoptRef(new OfflineAudioDestinationNode(context, renderTarget));
+ return adoptRef(*new OfflineAudioDestinationNode(context, renderTarget));
}
virtual ~OfflineAudioDestinationNode();
// AudioNode
- virtual void initialize() override;
- virtual void uninitialize() override;
+ void initialize() override;
+ void uninitialize() override;
// AudioDestinationNode
- virtual void enableInput(const String&) override { }
- virtual void startRendering() override;
+ void enableInput(const String&) override { }
+ void startRendering() override;
- virtual float sampleRate() const override { return m_renderTarget->sampleRate(); }
+ float sampleRate() const override { return m_renderTarget->sampleRate(); }
private:
- OfflineAudioDestinationNode(AudioContext*, AudioBuffer* renderTarget);
+ OfflineAudioDestinationNode(AudioContext&, AudioBuffer* renderTarget);
// This AudioNode renders into this AudioBuffer.
RefPtr<AudioBuffer> m_renderTarget;
@@ -71,10 +69,7 @@ private:
void offlineRender();
// For completion callback on main thread.
- static void notifyCompleteDispatch(void* userData);
void notifyComplete();
};
} // namespace WebCore
-
-#endif // OfflineAudioDestinationNode_h
diff --git a/Source/WebCore/Modules/webaudio/OscillatorNode.cpp b/Source/WebCore/Modules/webaudio/OscillatorNode.cpp
index 070dc9e1f..d38f9f3c6 100644
--- a/Source/WebCore/Modules/webaudio/OscillatorNode.cpp
+++ b/Source/WebCore/Modules/webaudio/OscillatorNode.cpp
@@ -28,32 +28,27 @@
#include "OscillatorNode.h"
-#include "AudioContext.h"
#include "AudioNodeOutput.h"
-#include "AudioUtilities.h"
-#include "ExceptionCode.h"
+#include "AudioParam.h"
#include "PeriodicWave.h"
#include "VectorMath.h"
-#include <algorithm>
-#include <wtf/MathExtras.h>
namespace WebCore {
using namespace VectorMath;
-PeriodicWave* OscillatorNode::s_periodicWaveSine = 0;
-PeriodicWave* OscillatorNode::s_periodicWaveSquare = 0;
-PeriodicWave* OscillatorNode::s_periodicWaveSawtooth = 0;
-PeriodicWave* OscillatorNode::s_periodicWaveTriangle = 0;
+PeriodicWave* OscillatorNode::s_periodicWaveSine = nullptr;
+PeriodicWave* OscillatorNode::s_periodicWaveSquare = nullptr;
+PeriodicWave* OscillatorNode::s_periodicWaveSawtooth = nullptr;
+PeriodicWave* OscillatorNode::s_periodicWaveTriangle = nullptr;
-PassRefPtr<OscillatorNode> OscillatorNode::create(AudioContext* context, float sampleRate)
+Ref<OscillatorNode> OscillatorNode::create(AudioContext& context, float sampleRate)
{
- return adoptRef(new OscillatorNode(context, sampleRate));
+ return adoptRef(*new OscillatorNode(context, sampleRate));
}
-OscillatorNode::OscillatorNode(AudioContext* context, float sampleRate)
+OscillatorNode::OscillatorNode(AudioContext& context, float sampleRate)
: AudioScheduledSourceNode(context, sampleRate)
- , m_type(SINE)
, m_firstRender(true)
, m_virtualReadIndex(0)
, m_phaseIncrements(AudioNode::ProcessingSizeInFrames)
@@ -80,75 +75,41 @@ OscillatorNode::~OscillatorNode()
uninitialize();
}
-String OscillatorNode::type() const
+ExceptionOr<void> OscillatorNode::setType(Type type)
{
- switch (m_type) {
- case SINE:
- return "sine";
- case SQUARE:
- return "square";
- case SAWTOOTH:
- return "sawtooth";
- case TRIANGLE:
- return "triangle";
- case CUSTOM:
- return "custom";
- default:
- ASSERT_NOT_REACHED();
- return "custom";
- }
-}
-
-void OscillatorNode::setType(const String& type)
-{
- if (type == "sine")
- setType(SINE);
- else if (type == "square")
- setType(SQUARE);
- else if (type == "sawtooth")
- setType(SAWTOOTH);
- else if (type == "triangle")
- setType(TRIANGLE);
- else
- ASSERT_NOT_REACHED();
-}
-
-bool OscillatorNode::setType(unsigned type)
-{
- PeriodicWave* periodicWave = 0;
- float sampleRate = this->sampleRate();
+ PeriodicWave* periodicWave = nullptr;
switch (type) {
- case SINE:
+ case Type::Sine:
if (!s_periodicWaveSine)
- s_periodicWaveSine = PeriodicWave::createSine(sampleRate).leakRef();
+ s_periodicWaveSine = &PeriodicWave::createSine(sampleRate()).leakRef();
periodicWave = s_periodicWaveSine;
break;
- case SQUARE:
+ case Type::Square:
if (!s_periodicWaveSquare)
- s_periodicWaveSquare = PeriodicWave::createSquare(sampleRate).leakRef();
+ s_periodicWaveSquare = &PeriodicWave::createSquare(sampleRate()).leakRef();
periodicWave = s_periodicWaveSquare;
break;
- case SAWTOOTH:
+ case Type::Sawtooth:
if (!s_periodicWaveSawtooth)
- s_periodicWaveSawtooth = PeriodicWave::createSawtooth(sampleRate).leakRef();
+ s_periodicWaveSawtooth = &PeriodicWave::createSawtooth(sampleRate()).leakRef();
periodicWave = s_periodicWaveSawtooth;
break;
- case TRIANGLE:
+ case Type::Triangle:
if (!s_periodicWaveTriangle)
- s_periodicWaveTriangle = PeriodicWave::createTriangle(sampleRate).leakRef();
+ s_periodicWaveTriangle = &PeriodicWave::createTriangle(sampleRate()).leakRef();
periodicWave = s_periodicWaveTriangle;
break;
- case CUSTOM:
- default:
- // Return error for invalid types, including CUSTOM since setPeriodicWave() method must be
- // called explicitly.
- return false;
+ case Type::Custom:
+ if (m_type != Type::Custom)
+ return Exception { INVALID_STATE_ERR };
+ return { };
}
setPeriodicWave(periodicWave);
m_type = type;
- return true;
+
+ return { };
}
bool OscillatorNode::calculateSampleAccuratePhaseIncrements(size_t framesToProcess)
@@ -219,10 +180,10 @@ bool OscillatorNode::calculateSampleAccuratePhaseIncrements(size_t framesToProce
void OscillatorNode::process(size_t framesToProcess)
{
- AudioBus* outputBus = output(0)->bus();
+ auto& outputBus = *output(0)->bus();
- if (!isInitialized() || !outputBus->numberOfChannels()) {
- outputBus->zero();
+ if (!isInitialized() || !outputBus.numberOfChannels()) {
+ outputBus.zero();
return;
}
@@ -231,33 +192,32 @@ void OscillatorNode::process(size_t framesToProcess)
return;
// The audio thread can't block on this lock, so we use std::try_to_lock instead.
- std::unique_lock<std::mutex> lock(m_processMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_processMutex, std::try_to_lock);
if (!lock.owns_lock()) {
// Too bad - the try_lock() failed. We must be in the middle of changing wave-tables.
- outputBus->zero();
+ outputBus.zero();
return;
}
// We must access m_periodicWave only inside the lock.
if (!m_periodicWave.get()) {
- outputBus->zero();
+ outputBus.zero();
return;
}
size_t quantumFrameOffset;
size_t nonSilentFramesToProcess;
-
updateSchedulingInfo(framesToProcess, outputBus, quantumFrameOffset, nonSilentFramesToProcess);
if (!nonSilentFramesToProcess) {
- outputBus->zero();
+ outputBus.zero();
return;
}
unsigned periodicWaveSize = m_periodicWave->periodicWaveSize();
double invPeriodicWaveSize = 1.0 / periodicWaveSize;
- float* destP = outputBus->channel(0)->mutableData();
+ float* destP = outputBus.channel(0)->mutableData();
ASSERT(quantumFrameOffset <= framesToProcess);
@@ -269,8 +229,8 @@ void OscillatorNode::process(size_t framesToProcess)
bool hasSampleAccurateValues = calculateSampleAccuratePhaseIncrements(framesToProcess);
float frequency = 0;
- float* higherWaveData = 0;
- float* lowerWaveData = 0;
+ float* higherWaveData = nullptr;
+ float* lowerWaveData = nullptr;
float tableInterpolationFactor;
if (!hasSampleAccurateValues) {
@@ -327,7 +287,7 @@ void OscillatorNode::process(size_t framesToProcess)
m_virtualReadIndex = virtualReadIndex;
- outputBus->clearSilentFlag();
+ outputBus.clearSilentFlag();
}
void OscillatorNode::reset()
@@ -340,9 +300,9 @@ void OscillatorNode::setPeriodicWave(PeriodicWave* periodicWave)
ASSERT(isMainThread());
// This synchronizes with process().
- std::lock_guard<std::mutex> lock(m_processMutex);
+ std::lock_guard<Lock> lock(m_processMutex);
m_periodicWave = periodicWave;
- m_type = CUSTOM;
+ m_type = Type::Custom;
}
bool OscillatorNode::propagatesSilence() const
diff --git a/Source/WebCore/Modules/webaudio/OscillatorNode.h b/Source/WebCore/Modules/webaudio/OscillatorNode.h
index 05969266f..790f01507 100644
--- a/Source/WebCore/Modules/webaudio/OscillatorNode.h
+++ b/Source/WebCore/Modules/webaudio/OscillatorNode.h
@@ -22,47 +22,34 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef OscillatorNode_h
-#define OscillatorNode_h
+#pragma once
-#include "AudioBus.h"
-#include "AudioParam.h"
#include "AudioScheduledSourceNode.h"
-#include <mutex>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefPtr.h>
+#include <wtf/Lock.h>
namespace WebCore {
-class AudioContext;
class PeriodicWave;
// OscillatorNode is an audio generator of periodic waveforms.
-class OscillatorNode : public AudioScheduledSourceNode {
+class OscillatorNode final : public AudioScheduledSourceNode {
public:
// The waveform type.
- // These must be defined as in the .idl file.
- enum {
- SINE = 0,
- SQUARE = 1,
- SAWTOOTH = 2,
- TRIANGLE = 3,
- CUSTOM = 4
+ enum class Type {
+ Sine,
+ Square,
+ Sawtooth,
+ Triangle,
+ Custom
};
- static PassRefPtr<OscillatorNode> create(AudioContext*, float sampleRate);
+ static Ref<OscillatorNode> create(AudioContext&, float sampleRate);
virtual ~OscillatorNode();
-
- // AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
-
- String type() const;
- bool setType(unsigned); // Returns true on success.
- void setType(const String&);
+ Type type() const { return m_type; }
+ ExceptionOr<void> setType(Type);
AudioParam* frequency() { return m_frequency.get(); }
AudioParam* detune() { return m_detune.get(); }
@@ -70,18 +57,21 @@ public:
void setPeriodicWave(PeriodicWave*);
private:
- OscillatorNode(AudioContext*, float sampleRate);
+ OscillatorNode(AudioContext&, float sampleRate);
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override { return 0; }
+ void process(size_t framesToProcess) final;
+ void reset() final;
+
+ double tailTime() const final { return 0; }
+ double latencyTime() const final { return 0; }
// Returns true if there are sample-accurate timeline parameter changes.
bool calculateSampleAccuratePhaseIncrements(size_t framesToProcess);
- virtual bool propagatesSilence() const override;
+ bool propagatesSilence() const final;
// One of the waveform types defined in the enum.
- unsigned short m_type;
+ Type m_type { Type::Sine };
// Frequency value in Hertz.
RefPtr<AudioParam> m_frequency;
@@ -96,7 +86,7 @@ private:
double m_virtualReadIndex;
// This synchronizes process().
- mutable std::mutex m_processMutex;
+ mutable Lock m_processMutex;
// Stores sample-accurate values calculated according to frequency and detune.
AudioFloatArray m_phaseIncrements;
@@ -112,5 +102,3 @@ private:
};
} // namespace WebCore
-
-#endif // OscillatorNode_h
diff --git a/Source/WebCore/Modules/webaudio/OscillatorNode.idl b/Source/WebCore/Modules/webaudio/OscillatorNode.idl
index 8bf310d48..b5edc10a4 100644
--- a/Source/WebCore/Modules/webaudio/OscillatorNode.idl
+++ b/Source/WebCore/Modules/webaudio/OscillatorNode.idl
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2012, Google Inc. All rights reserved.
+ * Copyright (C) 2016, Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -22,20 +23,19 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-// OscillatorNode is an audio generator of periodic waveforms.
+enum OscillatorType {
+ "sine",
+ "square",
+ "sawtooth",
+ "triangle",
+ "custom"
+};
+
[
Conditional=WEB_AUDIO,
JSGenerateToJSObject,
] interface OscillatorNode : AudioNode {
-
- // Type constants.
- const unsigned short SINE = 0;
- const unsigned short SQUARE = 1;
- const unsigned short SAWTOOTH = 2;
- const unsigned short TRIANGLE = 3;
- const unsigned short CUSTOM = 4;
-
- [CustomSetter] attribute DOMString type;
+ [SetterMayThrowException] attribute OscillatorType type;
// Playback state constants.
const unsigned short UNSCHEDULED_STATE = 0;
@@ -48,13 +48,10 @@
readonly attribute AudioParam frequency; // in Hertz
readonly attribute AudioParam detune; // in Cents
- [RaisesException] void start(double when);
- [RaisesException] void stop(double when);
-
- [Conditional=LEGACY_WEB_AUDIO, RaisesException] void noteOn(double when);
- [Conditional=LEGACY_WEB_AUDIO, RaisesException] void noteOff(double when);
+ [MayThrowException] void start(optional unrestricted double when = 0);
+ [MayThrowException] void stop(optional unrestricted double when = 0);
- void setPeriodicWave(PeriodicWave wave);
+ void setPeriodicWave(PeriodicWave? wave); // FIXME: The parameter should not be nullable.
- attribute EventListener onended;
+ attribute EventHandler onended;
};
diff --git a/Source/WebCore/Modules/webaudio/PannerNode.cpp b/Source/WebCore/Modules/webaudio/PannerNode.cpp
index 17bf13042..d7f92cabc 100644
--- a/Source/WebCore/Modules/webaudio/PannerNode.cpp
+++ b/Source/WebCore/Modules/webaudio/PannerNode.cpp
@@ -23,17 +23,15 @@
*/
#include "config.h"
+#include "PannerNode.h"
#if ENABLE(WEB_AUDIO)
-#include "PannerNode.h"
-
#include "AudioBufferSourceNode.h"
#include "AudioBus.h"
#include "AudioContext.h"
#include "AudioNodeInput.h"
#include "AudioNodeOutput.h"
-#include "ExceptionCode.h"
#include "HRTFPanner.h"
#include "ScriptExecutionContext.h"
#include <wtf/MathExtras.h>
@@ -46,12 +44,15 @@ static void fixNANs(double &x)
x = 0.0;
}
-PannerNode::PannerNode(AudioContext* context, float sampleRate)
+PannerNode::PannerNode(AudioContext& context, float sampleRate)
: AudioNode(context, sampleRate)
- , m_panningModel(Panner::PanningModelHRTF)
+ , m_panningModel(PanningModelType::HRTF)
, m_lastGain(-1.0)
, m_connectionCount(0)
{
+ // Load the HRTF database asynchronously so we don't block the Javascript thread while creating the HRTF database.
+ m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context.sampleRate());
+
addInput(std::make_unique<AudioNodeInput>(this));
addOutput(std::make_unique<AudioNodeOutput>(this, 2));
@@ -81,11 +82,12 @@ void PannerNode::pullInputs(size_t framesToProcess)
{
// We override pullInputs(), so we can detect new AudioSourceNodes which have connected to us when new connections are made.
// These AudioSourceNodes need to be made aware of our existence in order to handle doppler shift pitch changes.
- if (m_connectionCount != context()->connectionCount()) {
- m_connectionCount = context()->connectionCount();
+ if (m_connectionCount != context().connectionCount()) {
+ m_connectionCount = context().connectionCount();
// Recursively go through all nodes connected to us.
- notifyAudioSourcesConnectedToNode(this);
+ HashSet<AudioNode*> visitedNodes;
+ notifyAudioSourcesConnectedToNode(this, visitedNodes);
}
AudioNode::pullInputs(framesToProcess);
@@ -101,14 +103,23 @@ void PannerNode::process(size_t framesToProcess)
}
AudioBus* source = input(0)->bus();
-
if (!source) {
destination->zero();
return;
}
+ // HRTFDatabase should be loaded before proceeding for offline audio context when panningModel() is "HRTF".
+ if (panningModel() == PanningModelType::HRTF && !m_hrtfDatabaseLoader->isLoaded()) {
+ if (context().isOfflineContext())
+ m_hrtfDatabaseLoader->waitForLoaderThreadCompletion();
+ else {
+ destination->zero();
+ return;
+ }
+ }
+
// The audio thread can't block on this lock, so we use std::try_to_lock instead.
- std::unique_lock<std::mutex> lock(m_pannerMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_pannerMutex, std::try_to_lock);
if (!lock.owns_lock()) {
// Too bad - The try_lock() failed. We must be in the middle of changing the panner.
destination->zero();
@@ -144,7 +155,7 @@ void PannerNode::initialize()
if (isInitialized())
return;
- m_panner = Panner::create(m_panningModel, sampleRate(), context()->hrtfDatabaseLoader());
+ m_panner = Panner::create(m_panningModel, sampleRate(), m_hrtfDatabaseLoader.get());
AudioNode::initialize();
}
@@ -160,100 +171,28 @@ void PannerNode::uninitialize()
AudioListener* PannerNode::listener()
{
- return context()->listener();
+ return context().listener();
}
-String PannerNode::panningModel() const
+void PannerNode::setPanningModel(PanningModelType model)
{
- switch (m_panningModel) {
- case EQUALPOWER:
- return "equalpower";
- case HRTF:
- return "HRTF";
- case SOUNDFIELD:
- return "soundfield";
- default:
- ASSERT_NOT_REACHED();
- return "HRTF";
- }
-}
+ if (!m_panner.get() || model != m_panningModel) {
+ // This synchronizes with process().
+ std::lock_guard<Lock> lock(m_pannerMutex);
-void PannerNode::setPanningModel(const String& model)
-{
- if (model == "equalpower")
- setPanningModel(EQUALPOWER);
- else if (model == "HRTF")
- setPanningModel(HRTF);
- else if (model == "soundfield")
- setPanningModel(SOUNDFIELD);
- else
- ASSERT_NOT_REACHED();
-}
-
-bool PannerNode::setPanningModel(unsigned model)
-{
- switch (model) {
- case EQUALPOWER:
- case HRTF:
- if (!m_panner.get() || model != m_panningModel) {
- // This synchronizes with process().
- std::lock_guard<std::mutex> lock(m_pannerMutex);
-
- m_panner = Panner::create(model, sampleRate(), context()->hrtfDatabaseLoader());
- m_panningModel = model;
- }
- break;
- case SOUNDFIELD:
- // FIXME: Implement sound field model. See // https://bugs.webkit.org/show_bug.cgi?id=77367.
- context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "'soundfield' panning model not implemented.");
- break;
- default:
- return false;
- }
-
- return true;
-}
-
-String PannerNode::distanceModel() const
-{
- switch (const_cast<PannerNode*>(this)->m_distanceEffect.model()) {
- case DistanceEffect::ModelLinear:
- return "linear";
- case DistanceEffect::ModelInverse:
- return "inverse";
- case DistanceEffect::ModelExponential:
- return "exponential";
- default:
- ASSERT_NOT_REACHED();
- return "inverse";
+ m_panner = Panner::create(model, sampleRate(), m_hrtfDatabaseLoader.get());
+ m_panningModel = model;
}
}
-void PannerNode::setDistanceModel(const String& model)
+DistanceModelType PannerNode::distanceModel() const
{
- if (model == "linear")
- setDistanceModel(DistanceEffect::ModelLinear);
- else if (model == "inverse")
- setDistanceModel(DistanceEffect::ModelInverse);
- else if (model == "exponential")
- setDistanceModel(DistanceEffect::ModelExponential);
- else
- ASSERT_NOT_REACHED();
+ return const_cast<PannerNode*>(this)->m_distanceEffect.model();
}
-bool PannerNode::setDistanceModel(unsigned model)
+void PannerNode::setDistanceModel(DistanceModelType model)
{
- switch (model) {
- case DistanceEffect::ModelLinear:
- case DistanceEffect::ModelInverse:
- case DistanceEffect::ModelExponential:
- m_distanceEffect.setModel(static_cast<DistanceEffect::ModelType>(model), true);
- break;
- default:
- return false;
- }
-
- return true;
+ m_distanceEffect.setModel(model, true);
}
void PannerNode::getAzimuthElevation(double* outAzimuth, double* outElevation)
@@ -385,7 +324,7 @@ float PannerNode::distanceConeGain()
return float(distanceGain * coneGain);
}
-void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node)
+void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node, HashSet<AudioNode*>& visitedNodes)
{
ASSERT(node);
if (!node)
@@ -404,7 +343,11 @@ void PannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node)
for (unsigned j = 0; j < input->numberOfRenderingConnections(); ++j) {
AudioNodeOutput* connectedOutput = input->renderingOutput(j);
AudioNode* connectedNode = connectedOutput->node();
- notifyAudioSourcesConnectedToNode(connectedNode); // recurse
+ if (visitedNodes.contains(connectedNode))
+ continue;
+
+ visitedNodes.add(connectedNode);
+ notifyAudioSourcesConnectedToNode(connectedNode, visitedNodes);
}
}
}
diff --git a/Source/WebCore/Modules/webaudio/PannerNode.h b/Source/WebCore/Modules/webaudio/PannerNode.h
index 190eb79d5..d5f6d2862 100644
--- a/Source/WebCore/Modules/webaudio/PannerNode.h
+++ b/Source/WebCore/Modules/webaudio/PannerNode.h
@@ -22,8 +22,9 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef PannerNode_h
-#define PannerNode_h
+#pragma once
+
+#if ENABLE(WEB_AUDIO)
#include "AudioBus.h"
#include "AudioListener.h"
@@ -32,9 +33,11 @@
#include "Cone.h"
#include "Distance.h"
#include "FloatPoint3D.h"
+#include "HRTFDatabaseLoader.h"
#include "Panner.h"
#include <memory>
-#include <mutex>
+#include <wtf/HashSet.h>
+#include <wtf/Lock.h>
namespace WebCore {
@@ -47,42 +50,26 @@ namespace WebCore {
class PannerNode : public AudioNode {
public:
- // These must be defined as in the .idl file and must match those in the Panner class.
- enum {
- EQUALPOWER = 0,
- HRTF = 1,
- SOUNDFIELD = 2,
- };
-
- // These must be defined as in the .idl file and must match those
- // in the DistanceEffect class.
- enum {
- LINEAR_DISTANCE = 0,
- INVERSE_DISTANCE = 1,
- EXPONENTIAL_DISTANCE = 2,
- };
-
- static PassRefPtr<PannerNode> create(AudioContext* context, float sampleRate)
+ static Ref<PannerNode> create(AudioContext& context, float sampleRate)
{
- return adoptRef(new PannerNode(context, sampleRate));
+ return adoptRef(*new PannerNode(context, sampleRate));
}
virtual ~PannerNode();
// AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void pullInputs(size_t framesToProcess) override;
- virtual void reset() override;
- virtual void initialize() override;
- virtual void uninitialize() override;
+ void process(size_t framesToProcess) override;
+ void pullInputs(size_t framesToProcess) override;
+ void reset() override;
+ void initialize() override;
+ void uninitialize() override;
// Listener
AudioListener* listener();
// Panning model
- String panningModel() const;
- bool setPanningModel(unsigned); // Returns true on success.
- void setPanningModel(const String&);
+ PanningModelType panningModel() const { return m_panningModel; }
+ void setPanningModel(PanningModelType);
// Position
FloatPoint3D position() const { return m_position; }
@@ -97,9 +84,8 @@ public:
void setVelocity(float x, float y, float z) { m_velocity = FloatPoint3D(x, y, z); }
// Distance parameters
- String distanceModel() const;
- bool setDistanceModel(unsigned); // Returns true on success.
- void setDistanceModel(const String&);
+ DistanceModelType distanceModel() const;
+ void setDistanceModel(DistanceModelType);
double refDistance() { return m_distanceEffect.refDistance(); }
void setRefDistance(double refDistance) { m_distanceEffect.setRefDistance(refDistance); }
@@ -127,21 +113,21 @@ public:
AudioParam* distanceGain() { return m_distanceGain.get(); }
AudioParam* coneGain() { return m_coneGain.get(); }
- virtual double tailTime() const override { return m_panner ? m_panner->tailTime() : 0; }
- virtual double latencyTime() const override { return m_panner ? m_panner->latencyTime() : 0; }
+ double tailTime() const override { return m_panner ? m_panner->tailTime() : 0; }
+ double latencyTime() const override { return m_panner ? m_panner->latencyTime() : 0; }
private:
- PannerNode(AudioContext*, float sampleRate);
+ PannerNode(AudioContext&, float sampleRate);
// Returns the combined distance and cone gain attenuation.
float distanceConeGain();
// Notifies any AudioBufferSourceNodes connected to us either directly or indirectly about our existence.
// This is in order to handle the pitch change necessary for the doppler shift.
- void notifyAudioSourcesConnectedToNode(AudioNode*);
+ void notifyAudioSourcesConnectedToNode(AudioNode*, HashSet<AudioNode*>& visitedNodes);
std::unique_ptr<Panner> m_panner;
- unsigned m_panningModel;
+ PanningModelType m_panningModel;
FloatPoint3D m_position;
FloatPoint3D m_orientation;
@@ -154,12 +140,15 @@ private:
ConeEffect m_coneEffect;
float m_lastGain;
+ // HRTF Database loader
+ RefPtr<HRTFDatabaseLoader> m_hrtfDatabaseLoader;
+
unsigned m_connectionCount;
// Synchronize process() and setPanningModel() which can change the panner.
- mutable std::mutex m_pannerMutex;
+ mutable Lock m_pannerMutex;
};
} // namespace WebCore
-#endif // PannerNode_h
+#endif
diff --git a/Source/WebCore/Modules/webaudio/PannerNode.idl b/Source/WebCore/Modules/webaudio/PannerNode.idl
index ee0bb287f..fc991bf94 100644
--- a/Source/WebCore/Modules/webaudio/PannerNode.idl
+++ b/Source/WebCore/Modules/webaudio/PannerNode.idl
@@ -24,36 +24,43 @@
[
Conditional=WEB_AUDIO,
+ ImplementedAs=PanningModelType
+] enum PanningModelType {
+ "equalpower",
+ "HRTF"
+};
+
+[
+ Conditional=WEB_AUDIO,
+ ImplementedAs=DistanceModelType
+] enum DistanceModelType {
+ "linear",
+ "inverse",
+ "exponential"
+};
+
+[
+ Conditional=WEB_AUDIO,
JSGenerateToJSObject,
InterfaceName=webkitAudioPannerNode,
] interface PannerNode : AudioNode {
- // Panning model
- const unsigned short EQUALPOWER = 0;
- const unsigned short HRTF = 1;
- const unsigned short SOUNDFIELD = 2;
-
- // Distance model
- const unsigned short LINEAR_DISTANCE = 0;
- const unsigned short INVERSE_DISTANCE = 1;
- const unsigned short EXPONENTIAL_DISTANCE = 2;
-
// Default model for stereo is HRTF
- [CustomSetter] attribute DOMString panningModel;
+ attribute PanningModelType panningModel;
// Uses a 3D cartesian coordinate system
- void setPosition(float x, float y, float z);
- void setOrientation(float x, float y, float z);
- void setVelocity(float x, float y, float z);
+ void setPosition(unrestricted float x, unrestricted float y, unrestricted float z);
+ void setOrientation(unrestricted float x, unrestricted float y, unrestricted float z);
+ void setVelocity(unrestricted float x, unrestricted float y, unrestricted float z);
// Distance model
- [CustomSetter] attribute DOMString distanceModel;
+ attribute DistanceModelType distanceModel;
- attribute double refDistance;
- attribute double maxDistance;
- attribute double rolloffFactor;
+ attribute unrestricted double refDistance;
+ attribute unrestricted double maxDistance;
+ attribute unrestricted double rolloffFactor;
// Directional sound cone
- attribute double coneInnerAngle;
- attribute double coneOuterAngle;
- attribute double coneOuterGain;
+ attribute unrestricted double coneInnerAngle;
+ attribute unrestricted double coneOuterAngle;
+ attribute unrestricted double coneOuterGain;
};
diff --git a/Source/WebCore/Modules/webaudio/PeriodicWave.cpp b/Source/WebCore/Modules/webaudio/PeriodicWave.cpp
index 3044c21c1..bb2a07d23 100644
--- a/Source/WebCore/Modules/webaudio/PeriodicWave.cpp
+++ b/Source/WebCore/Modules/webaudio/PeriodicWave.cpp
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -33,7 +33,6 @@
#include "PeriodicWave.h"
#include "FFTFrame.h"
-#include "OscillatorNode.h"
#include "VectorMath.h"
#include <algorithm>
@@ -45,44 +44,40 @@ namespace WebCore {
using namespace VectorMath;
-PassRefPtr<PeriodicWave> PeriodicWave::create(float sampleRate, Float32Array* real, Float32Array* imag)
+Ref<PeriodicWave> PeriodicWave::create(float sampleRate, Float32Array& real, Float32Array& imaginary)
{
- bool isGood = real && imag && real->length() == imag->length();
- ASSERT(isGood);
- if (isGood) {
- RefPtr<PeriodicWave> waveTable = adoptRef(new PeriodicWave(sampleRate));
- size_t numberOfComponents = real->length();
- waveTable->createBandLimitedTables(real->data(), imag->data(), numberOfComponents);
- return waveTable;
- }
- return nullptr;
+ ASSERT(real.length() == imaginary.length());
+
+ auto waveTable = adoptRef(*new PeriodicWave(sampleRate));
+ waveTable->createBandLimitedTables(real.data(), imaginary.data(), real.length());
+ return waveTable;
}
-PassRefPtr<PeriodicWave> PeriodicWave::createSine(float sampleRate)
+Ref<PeriodicWave> PeriodicWave::createSine(float sampleRate)
{
- RefPtr<PeriodicWave> waveTable = adoptRef(new PeriodicWave(sampleRate));
- waveTable->generateBasicWaveform(OscillatorNode::SINE);
+ Ref<PeriodicWave> waveTable = adoptRef(*new PeriodicWave(sampleRate));
+ waveTable->generateBasicWaveform(Type::Sine);
return waveTable;
}
-PassRefPtr<PeriodicWave> PeriodicWave::createSquare(float sampleRate)
+Ref<PeriodicWave> PeriodicWave::createSquare(float sampleRate)
{
- RefPtr<PeriodicWave> waveTable = adoptRef(new PeriodicWave(sampleRate));
- waveTable->generateBasicWaveform(OscillatorNode::SQUARE);
+ Ref<PeriodicWave> waveTable = adoptRef(*new PeriodicWave(sampleRate));
+ waveTable->generateBasicWaveform(Type::Square);
return waveTable;
}
-PassRefPtr<PeriodicWave> PeriodicWave::createSawtooth(float sampleRate)
+Ref<PeriodicWave> PeriodicWave::createSawtooth(float sampleRate)
{
- RefPtr<PeriodicWave> waveTable = adoptRef(new PeriodicWave(sampleRate));
- waveTable->generateBasicWaveform(OscillatorNode::SAWTOOTH);
+ Ref<PeriodicWave> waveTable = adoptRef(*new PeriodicWave(sampleRate));
+ waveTable->generateBasicWaveform(Type::Sawtooth);
return waveTable;
}
-PassRefPtr<PeriodicWave> PeriodicWave::createTriangle(float sampleRate)
+Ref<PeriodicWave> PeriodicWave::createTriangle(float sampleRate)
{
- RefPtr<PeriodicWave> waveTable = adoptRef(new PeriodicWave(sampleRate));
- waveTable->generateBasicWaveform(OscillatorNode::TRIANGLE);
+ Ref<PeriodicWave> waveTable = adoptRef(*new PeriodicWave(sampleRate));
+ waveTable->generateBasicWaveform(Type::Triangle);
return waveTable;
}
@@ -217,7 +212,7 @@ void PeriodicWave::createBandLimitedTables(const float* realData, const float* i
}
}
-void PeriodicWave::generateBasicWaveform(int shape)
+void PeriodicWave::generateBasicWaveform(Type shape)
{
unsigned fftSize = periodicWaveSize();
unsigned halfSize = fftSize / 2;
@@ -242,31 +237,26 @@ void PeriodicWave::generateBasicWaveform(int shape)
// Calculate Fourier coefficients depending on the shape.
// Note that the overall scaling (magnitude) of the waveforms is normalized in createBandLimitedTables().
switch (shape) {
- case OscillatorNode::SINE:
+ case Type::Sine:
// Standard sine wave function.
a = 0;
b = (n == 1) ? 1 : 0;
break;
- case OscillatorNode::SQUARE:
+ case Type::Square:
// Square-shaped waveform with the first half its maximum value and the second half its minimum value.
a = 0;
b = invOmega * ((n & 1) ? 2 : 0);
break;
- case OscillatorNode::SAWTOOTH:
+ case Type::Sawtooth:
// Sawtooth-shaped waveform with the first half ramping from zero to maximum and the second half from minimum to zero.
a = 0;
b = -invOmega * cos(0.5 * omega);
break;
- case OscillatorNode::TRIANGLE:
+ case Type::Triangle:
// Triangle-shaped waveform going from its maximum value to its minimum value then back to the maximum value.
a = (4 - 4 * cos(0.5 * omega)) / (n * n * piFloat * piFloat);
b = 0;
break;
- default:
- ASSERT_NOT_REACHED();
- a = 0;
- b = 0;
- break;
}
realP[n] = a;
diff --git a/Source/WebCore/Modules/webaudio/PeriodicWave.h b/Source/WebCore/Modules/webaudio/PeriodicWave.h
index 79a098c55..0a1253c89 100644
--- a/Source/WebCore/Modules/webaudio/PeriodicWave.h
+++ b/Source/WebCore/Modules/webaudio/PeriodicWave.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,13 +26,11 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef PeriodicWave_h
-#define PeriodicWave_h
+#pragma once
#include "AudioArray.h"
#include <memory>
#include <runtime/Float32Array.h>
-#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/RefPtr.h>
#include <wtf/Vector.h>
@@ -41,13 +39,13 @@ namespace WebCore {
class PeriodicWave : public RefCounted<PeriodicWave> {
public:
- static PassRefPtr<PeriodicWave> createSine(float sampleRate);
- static PassRefPtr<PeriodicWave> createSquare(float sampleRate);
- static PassRefPtr<PeriodicWave> createSawtooth(float sampleRate);
- static PassRefPtr<PeriodicWave> createTriangle(float sampleRate);
+ static Ref<PeriodicWave> createSine(float sampleRate);
+ static Ref<PeriodicWave> createSquare(float sampleRate);
+ static Ref<PeriodicWave> createSawtooth(float sampleRate);
+ static Ref<PeriodicWave> createTriangle(float sampleRate);
// Creates an arbitrary wave given the frequency components (Fourier coefficients).
- static PassRefPtr<PeriodicWave> create(float sampleRate, Float32Array* real, Float32Array* imag);
+ static Ref<PeriodicWave> create(float sampleRate, Float32Array& real, Float32Array& imag);
// Returns pointers to the lower and higher wave data for the pitch range containing
// the given fundamental frequency. These two tables are in adjacent "pitch" ranges
@@ -64,9 +62,16 @@ public:
float sampleRate() const { return m_sampleRate; }
private:
+ enum class Type {
+ Sine,
+ Square,
+ Sawtooth,
+ Triangle,
+ };
+
explicit PeriodicWave(float sampleRate);
- void generateBasicWaveform(int);
+ void generateBasicWaveform(Type);
float m_sampleRate;
unsigned m_periodicWaveSize;
@@ -93,5 +98,3 @@ private:
};
} // namespace WebCore
-
-#endif // PeriodicWave_h
diff --git a/Source/WebCore/Modules/webaudio/RealtimeAnalyser.cpp b/Source/WebCore/Modules/webaudio/RealtimeAnalyser.cpp
index f4ff708a7..035dc2b20 100644
--- a/Source/WebCore/Modules/webaudio/RealtimeAnalyser.cpp
+++ b/Source/WebCore/Modules/webaudio/RealtimeAnalyser.cpp
@@ -30,7 +30,6 @@
#include "AudioBus.h"
#include "AudioUtilities.h"
-#include "FFTFrame.h"
#include "VectorMath.h"
#include <algorithm>
#include <complex>
@@ -185,7 +184,7 @@ void RealtimeAnalyser::doFFTAnalysis()
imagP[0] = 0;
// Normalize so than an input sine wave at 0dBfs registers as 0dBfs (undo FFT scaling factor).
- const double magnitudeScale = 1.0 / DefaultFFTSize;
+ const double magnitudeScale = 1.0 / fftSize;
// A value of 0 does no averaging with the previous result. Larger values produce slower, but smoother changes.
double k = m_smoothingTimeConstant;
diff --git a/Source/WebCore/Modules/webaudio/RealtimeAnalyser.h b/Source/WebCore/Modules/webaudio/RealtimeAnalyser.h
index aa0f877ac..13f94739d 100644
--- a/Source/WebCore/Modules/webaudio/RealtimeAnalyser.h
+++ b/Source/WebCore/Modules/webaudio/RealtimeAnalyser.h
@@ -22,10 +22,10 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef RealtimeAnalyser_h
-#define RealtimeAnalyser_h
+#pragma once
#include "AudioArray.h"
+#include "FFTFrame.h"
#include <memory>
#include <runtime/Float32Array.h>
#include <runtime/Uint8Array.h>
@@ -35,7 +35,6 @@
namespace WebCore {
class AudioBus;
-class FFTFrame;
class RealtimeAnalyser {
WTF_MAKE_NONCOPYABLE(RealtimeAnalyser);
@@ -97,5 +96,3 @@ private:
};
} // namespace WebCore
-
-#endif // RealtimeAnalyser_h
diff --git a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
index cb3297bd4..b95e093d0 100644
--- a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
+++ b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
@@ -35,12 +35,13 @@
#include "AudioNodeOutput.h"
#include "AudioProcessingEvent.h"
#include "Document.h"
+#include "EventNames.h"
#include <runtime/Float32Array.h>
#include <wtf/MainThread.h>
namespace WebCore {
-PassRefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+RefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
{
// Check for valid buffer size.
switch (bufferSize) {
@@ -65,10 +66,10 @@ PassRefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* contex
if (numberOfOutputChannels > AudioContext::maxNumberOfChannels())
return nullptr;
- return adoptRef(new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
+ return adoptRef(*new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
}
-ScriptProcessorNode::ScriptProcessorNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+ScriptProcessorNode::ScriptProcessorNode(AudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
: AudioNode(context, sampleRate)
, m_doubleBufferIndex(0)
, m_doubleBufferIndexForEvent(0)
@@ -104,7 +105,7 @@ void ScriptProcessorNode::initialize()
if (isInitialized())
return;
- float sampleRate = context()->sampleRate();
+ float sampleRate = context().sampleRate();
// Create double buffers on both the input and output sides.
// These AudioBuffers will be directly accessed in the main thread by JavaScript.
@@ -182,14 +183,14 @@ void ScriptProcessorNode::process(size_t framesToProcess)
return;
for (unsigned i = 0; i < numberOfInputChannels; i++)
- m_internalInputBus->setChannelMemory(i, inputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex, framesToProcess);
+ m_internalInputBus->setChannelMemory(i, inputBuffer->channelData(i)->data() + m_bufferReadWriteIndex, framesToProcess);
if (numberOfInputChannels)
m_internalInputBus->copyFrom(*inputBus);
// Copy from the output buffer to the output.
for (unsigned i = 0; i < numberOfOutputChannels; ++i)
- memcpy(outputBus->channel(i)->mutableData(), outputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex, sizeof(float) * framesToProcess);
+ memcpy(outputBus->channel(i)->mutableData(), outputBuffer->channelData(i)->data() + m_bufferReadWriteIndex, sizeof(float) * framesToProcess);
// Update the buffering index.
m_bufferReadWriteIndex = (m_bufferReadWriteIndex + framesToProcess) % bufferSize();
@@ -210,30 +211,20 @@ void ScriptProcessorNode::process(size_t framesToProcess)
// Fire the event on the main thread, not this one (which is the realtime audio thread).
m_doubleBufferIndexForEvent = m_doubleBufferIndex;
m_isRequestOutstanding = true;
- callOnMainThread(fireProcessEventDispatch, this);
- }
-
- swapBuffers();
- }
-}
-void ScriptProcessorNode::setOnaudioprocess(PassRefPtr<EventListener> listener)
-{
- m_hasAudioProcessListener = listener;
- setAttributeEventListener(eventNames().audioprocessEvent, listener);
-}
+ callOnMainThread([this] {
+ if (!m_hasAudioProcessListener)
+ return;
-void ScriptProcessorNode::fireProcessEventDispatch(void* userData)
-{
- ScriptProcessorNode* jsAudioNode = static_cast<ScriptProcessorNode*>(userData);
- ASSERT(jsAudioNode);
- if (!jsAudioNode)
- return;
+ fireProcessEvent();
- jsAudioNode->fireProcessEvent();
+ // De-reference to match the ref() call in process().
+ deref();
+ });
+ }
- // De-reference to match the ref() call in process().
- jsAudioNode->deref();
+ swapBuffers();
+ }
}
void ScriptProcessorNode::fireProcessEvent()
@@ -252,12 +243,16 @@ void ScriptProcessorNode::fireProcessEvent()
return;
// Avoid firing the event if the document has already gone away.
- if (context()->scriptExecutionContext()) {
+ if (context().scriptExecutionContext()) {
// Let the audio thread know we've gotten to the point where it's OK for it to make another request.
m_isRequestOutstanding = false;
-
+
+ // Calculate playbackTime with the buffersize which needs to be processed each time when onaudioprocess is called.
+ // The outputBuffer being passed to JS will be played after exhausting previous outputBuffer by double-buffering.
+ double playbackTime = (context().currentSampleFrame() + m_bufferSize) / static_cast<double>(context().sampleRate());
+
// Call the JavaScript event handler which will do the audio processing.
- dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer));
+ dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer, playbackTime));
}
}
@@ -282,6 +277,28 @@ double ScriptProcessorNode::latencyTime() const
return std::numeric_limits<double>::infinity();
}
+bool ScriptProcessorNode::addEventListener(const AtomicString& eventType, Ref<EventListener>&& listener, const AddEventListenerOptions& options)
+{
+ bool success = AudioNode::addEventListener(eventType, WTFMove(listener), options);
+ if (success && eventType == eventNames().audioprocessEvent)
+ m_hasAudioProcessListener = hasEventListeners(eventNames().audioprocessEvent);
+ return success;
+}
+
+bool ScriptProcessorNode::removeEventListener(const AtomicString& eventType, EventListener& listener, const ListenerOptions& options)
+{
+ bool success = AudioNode::removeEventListener(eventType, listener, options);
+ if (success && eventType == eventNames().audioprocessEvent)
+ m_hasAudioProcessListener = hasEventListeners(eventNames().audioprocessEvent);
+ return success;
+}
+
+void ScriptProcessorNode::removeAllEventListeners()
+{
+ m_hasAudioProcessListener = false;
+ AudioNode::removeAllEventListeners();
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)
diff --git a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.h b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.h
index 15bb27b29..4c6bafe94 100644
--- a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.h
+++ b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ScriptProcessorNode_h
-#define ScriptProcessorNode_h
+#pragma once
#include "ActiveDOMObject.h"
#include "AudioBus.h"
@@ -31,7 +30,6 @@
#include "EventListener.h"
#include "EventTarget.h"
#include <wtf/Forward.h>
-#include <wtf/PassRefPtr.h>
#include <wtf/RefPtr.h>
#include <wtf/Vector.h>
@@ -53,30 +51,30 @@ public:
// This value controls how frequently the onaudioprocess event handler is called and how many sample-frames need to be processed each call.
// Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches.
// The value chosen must carefully balance between latency and audio quality.
- static PassRefPtr<ScriptProcessorNode> create(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
+ static RefPtr<ScriptProcessorNode> create(AudioContext&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
virtual ~ScriptProcessorNode();
// AudioNode
- virtual void process(size_t framesToProcess) override;
- virtual void reset() override;
- virtual void initialize() override;
- virtual void uninitialize() override;
+ void process(size_t framesToProcess) override;
+ void reset() override;
+ void initialize() override;
+ void uninitialize() override;
size_t bufferSize() const { return m_bufferSize; }
- EventListener* onaudioprocess() { return getAttributeEventListener(eventNames().audioprocessEvent); }
- void setOnaudioprocess(PassRefPtr<EventListener>);
-
private:
- virtual double tailTime() const override;
- virtual double latencyTime() const override;
+ double tailTime() const override;
+ double latencyTime() const override;
- ScriptProcessorNode(AudioContext*, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
+ ScriptProcessorNode(AudioContext&, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels);
- static void fireProcessEventDispatch(void* userData);
void fireProcessEvent();
+ bool addEventListener(const AtomicString& eventType, Ref<EventListener>&&, const AddEventListenerOptions&) override;
+ bool removeEventListener(const AtomicString& eventType, EventListener&, const ListenerOptions&) override;
+ void removeAllEventListeners() override;
+
// Double buffering
unsigned doubleBufferIndex() const { return m_doubleBufferIndex; }
void swapBuffers() { m_doubleBufferIndex = 1 - m_doubleBufferIndex; }
@@ -97,5 +95,3 @@ private:
};
} // namespace WebCore
-
-#endif // ScriptProcessorNode_h
diff --git a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.idl b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.idl
index d5dcd5033..45cc63083 100644
--- a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.idl
+++ b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.idl
@@ -29,7 +29,7 @@
JSGenerateToNativeObject
] interface ScriptProcessorNode : AudioNode {
// Rendering callback
- attribute EventListener onaudioprocess;
+ attribute EventHandler onaudioprocess;
readonly attribute long bufferSize;
};
diff --git a/Source/WebCore/Modules/webaudio/WaveShaperDSPKernel.h b/Source/WebCore/Modules/webaudio/WaveShaperDSPKernel.h
index 7f5335a49..43a686eb9 100644
--- a/Source/WebCore/Modules/webaudio/WaveShaperDSPKernel.h
+++ b/Source/WebCore/Modules/webaudio/WaveShaperDSPKernel.h
@@ -22,8 +22,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WaveShaperDSPKernel_h
-#define WaveShaperDSPKernel_h
+#pragma once
#include "AudioArray.h"
#include "AudioDSPKernel.h"
@@ -34,8 +33,6 @@
namespace WebCore {
-class WaveShaperProcessor;
-
// WaveShaperDSPKernel is an AudioDSPKernel and is responsible for non-linear distortion on one channel.
class WaveShaperDSPKernel : public AudioDSPKernel {
@@ -43,10 +40,10 @@ public:
explicit WaveShaperDSPKernel(WaveShaperProcessor*);
// AudioDSPKernel
- virtual void process(const float* source, float* dest, size_t framesToProcess);
- virtual void reset();
- virtual double tailTime() const override { return 0; }
- virtual double latencyTime() const override;
+ void process(const float* source, float* dest, size_t framesToProcess) override;
+ void reset() override;
+ double tailTime() const override { return 0; }
+ double latencyTime() const override;
// Oversampling requires more resources, so let's only allocate them if needed.
void lazyInitializeOversampling();
@@ -71,5 +68,3 @@ protected:
};
} // namespace WebCore
-
-#endif // WaveShaperDSPKernel_h
diff --git a/Source/WebCore/Modules/webaudio/WaveShaperNode.cpp b/Source/WebCore/Modules/webaudio/WaveShaperNode.cpp
index 69ec27a2c..50cf0630c 100644
--- a/Source/WebCore/Modules/webaudio/WaveShaperNode.cpp
+++ b/Source/WebCore/Modules/webaudio/WaveShaperNode.cpp
@@ -23,29 +23,28 @@
*/
#include "config.h"
+#include "WaveShaperNode.h"
#if ENABLE(WEB_AUDIO)
-#include "WaveShaperNode.h"
-
-#include "ExceptionCode.h"
+#include "AudioContext.h"
#include <wtf/MainThread.h>
namespace WebCore {
-WaveShaperNode::WaveShaperNode(AudioContext* context)
- : AudioBasicProcessorNode(context, context->sampleRate())
+WaveShaperNode::WaveShaperNode(AudioContext& context)
+ : AudioBasicProcessorNode(context, context.sampleRate())
{
- m_processor = std::make_unique<WaveShaperProcessor>(context->sampleRate(), 1);
+ m_processor = std::make_unique<WaveShaperProcessor>(context.sampleRate(), 1);
setNodeType(NodeTypeWaveShaper);
initialize();
}
-void WaveShaperNode::setCurve(Float32Array* curve)
+void WaveShaperNode::setCurve(Float32Array& curve)
{
ASSERT(isMainThread());
- waveShaperProcessor()->setCurve(curve);
+ waveShaperProcessor()->setCurve(&curve);
}
Float32Array* WaveShaperNode::curve()
@@ -53,36 +52,41 @@ Float32Array* WaveShaperNode::curve()
return waveShaperProcessor()->curve();
}
-void WaveShaperNode::setOversample(const String& type, ExceptionCode& ec)
+static inline WaveShaperProcessor::OverSampleType processorType(WaveShaperNode::OverSampleType type)
+{
+ switch (type) {
+ case WaveShaperNode::OverSampleType::None:
+ return WaveShaperProcessor::OverSampleNone;
+ case WaveShaperNode::OverSampleType::_2x:
+ return WaveShaperProcessor::OverSample2x;
+ case WaveShaperNode::OverSampleType::_4x:
+ return WaveShaperProcessor::OverSample4x;
+ }
+ ASSERT_NOT_REACHED();
+ return WaveShaperProcessor::OverSampleNone;
+}
+
+void WaveShaperNode::setOversample(OverSampleType type)
{
ASSERT(isMainThread());
// Synchronize with any graph changes or changes to channel configuration.
- AudioContext::AutoLocker contextLocker(*context());
-
- if (type == "none")
- waveShaperProcessor()->setOversample(WaveShaperProcessor::OverSampleNone);
- else if (type == "2x")
- waveShaperProcessor()->setOversample(WaveShaperProcessor::OverSample2x);
- else if (type == "4x")
- waveShaperProcessor()->setOversample(WaveShaperProcessor::OverSample4x);
- else
- ec = INVALID_STATE_ERR;
+ AudioContext::AutoLocker contextLocker(context());
+ waveShaperProcessor()->setOversample(processorType(type));
}
-String WaveShaperNode::oversample() const
+auto WaveShaperNode::oversample() const -> OverSampleType
{
switch (const_cast<WaveShaperNode*>(this)->waveShaperProcessor()->oversample()) {
case WaveShaperProcessor::OverSampleNone:
- return "none";
+ return OverSampleType::None;
case WaveShaperProcessor::OverSample2x:
- return "2x";
+ return OverSampleType::_2x;
case WaveShaperProcessor::OverSample4x:
- return "4x";
- default:
- ASSERT_NOT_REACHED();
- return "none";
+ return OverSampleType::_4x;
}
+ ASSERT_NOT_REACHED();
+ return OverSampleType::None;
}
} // namespace WebCore
diff --git a/Source/WebCore/Modules/webaudio/WaveShaperNode.h b/Source/WebCore/Modules/webaudio/WaveShaperNode.h
index cb65372a1..773c04fc0 100644
--- a/Source/WebCore/Modules/webaudio/WaveShaperNode.h
+++ b/Source/WebCore/Modules/webaudio/WaveShaperNode.h
@@ -22,38 +22,35 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WaveShaperNode_h
-#define WaveShaperNode_h
+#pragma once
#include "AudioBasicProcessorNode.h"
-#include "BiquadProcessor.h"
#include "WaveShaperProcessor.h"
#include <wtf/Forward.h>
namespace WebCore {
-
-class WaveShaperNode : public AudioBasicProcessorNode {
+
+class WaveShaperNode final : public AudioBasicProcessorNode {
public:
- static PassRefPtr<WaveShaperNode> create(AudioContext* context)
+ static Ref<WaveShaperNode> create(AudioContext& context)
{
- return adoptRef(new WaveShaperNode(context));
+ return adoptRef(*new WaveShaperNode(context));
}
// setCurve() is called on the main thread.
- void setCurve(Float32Array*);
+ void setCurve(Float32Array&);
Float32Array* curve();
- void setOversample(const String& , ExceptionCode&);
- String oversample() const;
+ enum class OverSampleType { None, _2x, _4x };
+ void setOversample(OverSampleType);
+ OverSampleType oversample() const;
double latency() const { return latencyTime(); }
private:
- explicit WaveShaperNode(AudioContext*);
+ explicit WaveShaperNode(AudioContext&);
WaveShaperProcessor* waveShaperProcessor() { return static_cast<WaveShaperProcessor*>(processor()); }
};
} // namespace WebCore
-
-#endif // WaveShaperNode_h
diff --git a/Source/WebCore/Modules/webaudio/WaveShaperNode.idl b/Source/WebCore/Modules/webaudio/WaveShaperNode.idl
index 8a377536c..965656a73 100644
--- a/Source/WebCore/Modules/webaudio/WaveShaperNode.idl
+++ b/Source/WebCore/Modules/webaudio/WaveShaperNode.idl
@@ -33,5 +33,5 @@ enum OverSampleType {
JSGenerateToJSObject
] interface WaveShaperNode : AudioNode {
attribute Float32Array curve;
- [SetterRaisesException] attribute OverSampleType oversample;
+ attribute OverSampleType oversample;
};
diff --git a/Source/WebCore/Modules/webaudio/WaveShaperProcessor.cpp b/Source/WebCore/Modules/webaudio/WaveShaperProcessor.cpp
index c2d6de0af..289c39f5f 100644
--- a/Source/WebCore/Modules/webaudio/WaveShaperProcessor.cpp
+++ b/Source/WebCore/Modules/webaudio/WaveShaperProcessor.cpp
@@ -52,7 +52,7 @@ std::unique_ptr<AudioDSPKernel> WaveShaperProcessor::createKernel()
void WaveShaperProcessor::setCurve(Float32Array* curve)
{
// This synchronizes with process().
- std::lock_guard<std::mutex> lock(m_processMutex);
+ std::lock_guard<Lock> lock(m_processMutex);
m_curve = curve;
}
@@ -60,14 +60,14 @@ void WaveShaperProcessor::setCurve(Float32Array* curve)
void WaveShaperProcessor::setOversample(OverSampleType oversample)
{
// This synchronizes with process().
- std::lock_guard<std::mutex> lock(m_processMutex);
+ std::lock_guard<Lock> lock(m_processMutex);
m_oversample = oversample;
if (oversample != OverSampleNone) {
- for (unsigned i = 0; i < m_kernels.size(); ++i) {
- WaveShaperDSPKernel* kernel = static_cast<WaveShaperDSPKernel*>(m_kernels[i].get());
- kernel->lazyInitializeOversampling();
+ for (auto& audioDSPKernel : m_kernels) {
+ WaveShaperDSPKernel& kernel = static_cast<WaveShaperDSPKernel&>(*audioDSPKernel);
+ kernel.lazyInitializeOversampling();
}
}
}
@@ -85,7 +85,7 @@ void WaveShaperProcessor::process(const AudioBus* source, AudioBus* destination,
return;
// The audio thread can't block on this lock, so we use std::try_to_lock instead.
- std::unique_lock<std::mutex> lock(m_processMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_processMutex, std::try_to_lock);
if (!lock.owns_lock()) {
// Too bad - the try_lock() failed. We must be in the middle of a setCurve() call.
destination->zero();
diff --git a/Source/WebCore/Modules/webaudio/WaveShaperProcessor.h b/Source/WebCore/Modules/webaudio/WaveShaperProcessor.h
index 7bda31da7..51e39394c 100644
--- a/Source/WebCore/Modules/webaudio/WaveShaperProcessor.h
+++ b/Source/WebCore/Modules/webaudio/WaveShaperProcessor.h
@@ -22,15 +22,14 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef WaveShaperProcessor_h
-#define WaveShaperProcessor_h
+#pragma once
#include "AudioDSPKernel.h"
#include "AudioDSPKernelProcessor.h"
#include "AudioNode.h"
#include <memory>
-#include <mutex>
#include <runtime/Float32Array.h>
+#include <wtf/Lock.h>
#include <wtf/RefPtr.h>
namespace WebCore {
@@ -49,9 +48,9 @@ public:
virtual ~WaveShaperProcessor();
- virtual std::unique_ptr<AudioDSPKernel> createKernel() override;
+ std::unique_ptr<AudioDSPKernel> createKernel() override;
- virtual void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override;
+ void process(const AudioBus* source, AudioBus* destination, size_t framesToProcess) override;
void setCurve(Float32Array*);
Float32Array* curve() { return m_curve.get(); }
@@ -66,9 +65,7 @@ private:
OverSampleType m_oversample;
// This synchronizes process() with setCurve().
- mutable std::mutex m_processMutex;
+ mutable Lock m_processMutex;
};
} // namespace WebCore
-
-#endif // WaveShaperProcessor_h