From 1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c Mon Sep 17 00:00:00 2001 From: Lorry Tar Creator Date: Tue, 27 Jun 2017 06:07:23 +0000 Subject: webkitgtk-2.16.5 --- Source/WebCore/Modules/webaudio/AnalyserNode.cpp | 35 +- Source/WebCore/Modules/webaudio/AnalyserNode.h | 40 +- Source/WebCore/Modules/webaudio/AnalyserNode.idl | 14 +- .../WebCore/Modules/webaudio/AsyncAudioDecoder.cpp | 44 +- .../WebCore/Modules/webaudio/AsyncAudioDecoder.h | 20 +- .../Modules/webaudio/AudioBasicInspectorNode.cpp | 28 +- .../Modules/webaudio/AudioBasicInspectorNode.h | 21 +- .../Modules/webaudio/AudioBasicProcessorNode.cpp | 4 +- .../Modules/webaudio/AudioBasicProcessorNode.h | 24 +- Source/WebCore/Modules/webaudio/AudioBuffer.cpp | 93 +-- Source/WebCore/Modules/webaudio/AudioBuffer.h | 34 +- Source/WebCore/Modules/webaudio/AudioBuffer.idl | 11 +- .../WebCore/Modules/webaudio/AudioBufferCallback.h | 7 +- .../Modules/webaudio/AudioBufferCallback.idl | 4 +- .../Modules/webaudio/AudioBufferSourceNode.cpp | 277 +++++--- .../Modules/webaudio/AudioBufferSourceNode.h | 54 +- .../Modules/webaudio/AudioBufferSourceNode.idl | 22 +- Source/WebCore/Modules/webaudio/AudioContext.cpp | 712 ++++++++++++--------- Source/WebCore/Modules/webaudio/AudioContext.h | 251 +++++--- Source/WebCore/Modules/webaudio/AudioContext.idl | 66 +- .../Modules/webaudio/AudioDestinationNode.cpp | 62 +- .../Modules/webaudio/AudioDestinationNode.h | 66 +- Source/WebCore/Modules/webaudio/AudioListener.cpp | 2 +- Source/WebCore/Modules/webaudio/AudioListener.h | 15 +- Source/WebCore/Modules/webaudio/AudioListener.idl | 12 +- Source/WebCore/Modules/webaudio/AudioNode.cpp | 205 +++--- Source/WebCore/Modules/webaudio/AudioNode.h | 41 +- Source/WebCore/Modules/webaudio/AudioNode.idl | 25 +- Source/WebCore/Modules/webaudio/AudioNodeInput.cpp | 24 +- Source/WebCore/Modules/webaudio/AudioNodeInput.h | 10 +- .../WebCore/Modules/webaudio/AudioNodeOutput.cpp | 45 +- Source/WebCore/Modules/webaudio/AudioNodeOutput.h | 8 +- Source/WebCore/Modules/webaudio/AudioParam.cpp | 22 +- Source/WebCore/Modules/webaudio/AudioParam.h | 20 +- Source/WebCore/Modules/webaudio/AudioParam.idl | 25 +- .../Modules/webaudio/AudioParamTimeline.cpp | 28 +- .../WebCore/Modules/webaudio/AudioParamTimeline.h | 18 +- .../Modules/webaudio/AudioProcessingEvent.cpp | 17 +- .../Modules/webaudio/AudioProcessingEvent.h | 23 +- .../Modules/webaudio/AudioProcessingEvent.idl | 1 + .../Modules/webaudio/AudioScheduledSourceNode.cpp | 113 ++-- .../Modules/webaudio/AudioScheduledSourceNode.h | 51 +- .../Modules/webaudio/AudioSummingJunction.cpp | 19 +- .../Modules/webaudio/AudioSummingJunction.h | 11 +- .../WebCore/Modules/webaudio/BiquadDSPKernel.cpp | 16 +- Source/WebCore/Modules/webaudio/BiquadDSPKernel.h | 13 +- .../WebCore/Modules/webaudio/BiquadFilterNode.cpp | 73 +-- Source/WebCore/Modules/webaudio/BiquadFilterNode.h | 34 +- .../WebCore/Modules/webaudio/BiquadFilterNode.idl | 32 +- .../WebCore/Modules/webaudio/BiquadProcessor.cpp | 11 +- Source/WebCore/Modules/webaudio/BiquadProcessor.h | 39 +- .../WebCore/Modules/webaudio/ChannelMergerNode.cpp | 10 +- .../WebCore/Modules/webaudio/ChannelMergerNode.h | 22 +- .../WebCore/Modules/webaudio/ChannelMergerNode.idl | 2 +- .../Modules/webaudio/ChannelSplitterNode.cpp | 6 +- .../WebCore/Modules/webaudio/ChannelSplitterNode.h | 18 +- Source/WebCore/Modules/webaudio/ConvolverNode.cpp | 43 +- Source/WebCore/Modules/webaudio/ConvolverNode.h | 41 +- Source/WebCore/Modules/webaudio/ConvolverNode.idl | 2 +- .../webaudio/DefaultAudioDestinationNode.cpp | 58 +- .../Modules/webaudio/DefaultAudioDestinationNode.h | 41 +- Source/WebCore/Modules/webaudio/DelayDSPKernel.h | 13 +- Source/WebCore/Modules/webaudio/DelayNode.cpp | 17 +- Source/WebCore/Modules/webaudio/DelayNode.h | 19 +- Source/WebCore/Modules/webaudio/DelayProcessor.cpp | 2 +- Source/WebCore/Modules/webaudio/DelayProcessor.h | 9 +- .../Modules/webaudio/DynamicsCompressorNode.cpp | 2 +- .../Modules/webaudio/DynamicsCompressorNode.h | 23 +- Source/WebCore/Modules/webaudio/GainNode.cpp | 4 +- Source/WebCore/Modules/webaudio/GainNode.h | 22 +- .../webaudio/MediaElementAudioSourceNode.cpp | 18 +- .../Modules/webaudio/MediaElementAudioSourceNode.h | 30 +- .../webaudio/MediaStreamAudioDestinationNode.cpp | 22 +- .../webaudio/MediaStreamAudioDestinationNode.h | 22 +- .../Modules/webaudio/MediaStreamAudioSource.cpp | 46 +- .../Modules/webaudio/MediaStreamAudioSource.h | 32 +- .../webaudio/MediaStreamAudioSourceNode.cpp | 95 +-- .../Modules/webaudio/MediaStreamAudioSourceNode.h | 42 +- .../webaudio/OfflineAudioCompletionEvent.cpp | 18 +- .../Modules/webaudio/OfflineAudioCompletionEvent.h | 16 +- .../Modules/webaudio/OfflineAudioContext.cpp | 34 +- .../WebCore/Modules/webaudio/OfflineAudioContext.h | 11 +- .../Modules/webaudio/OfflineAudioContext.idl | 6 +- .../webaudio/OfflineAudioDestinationNode.cpp | 38 +- .../Modules/webaudio/OfflineAudioDestinationNode.h | 23 +- Source/WebCore/Modules/webaudio/OscillatorNode.cpp | 114 ++-- Source/WebCore/Modules/webaudio/OscillatorNode.h | 54 +- Source/WebCore/Modules/webaudio/OscillatorNode.idl | 31 +- Source/WebCore/Modules/webaudio/PannerNode.cpp | 135 ++-- Source/WebCore/Modules/webaudio/PannerNode.h | 65 +- Source/WebCore/Modules/webaudio/PannerNode.idl | 49 +- Source/WebCore/Modules/webaudio/PeriodicWave.cpp | 58 +- Source/WebCore/Modules/webaudio/PeriodicWave.h | 27 +- .../WebCore/Modules/webaudio/RealtimeAnalyser.cpp | 3 +- Source/WebCore/Modules/webaudio/RealtimeAnalyser.h | 7 +- .../Modules/webaudio/ScriptProcessorNode.cpp | 75 ++- .../WebCore/Modules/webaudio/ScriptProcessorNode.h | 30 +- .../Modules/webaudio/ScriptProcessorNode.idl | 2 +- .../WebCore/Modules/webaudio/WaveShaperDSPKernel.h | 15 +- Source/WebCore/Modules/webaudio/WaveShaperNode.cpp | 56 +- Source/WebCore/Modules/webaudio/WaveShaperNode.h | 23 +- Source/WebCore/Modules/webaudio/WaveShaperNode.idl | 2 +- .../Modules/webaudio/WaveShaperProcessor.cpp | 12 +- .../WebCore/Modules/webaudio/WaveShaperProcessor.h | 13 +- 104 files changed, 2131 insertions(+), 2289 deletions(-) (limited to 'Source/WebCore/Modules/webaudio') diff --git a/Source/WebCore/Modules/webaudio/AnalyserNode.cpp b/Source/WebCore/Modules/webaudio/AnalyserNode.cpp index b54d3ae28..6aff9a63d 100644 --- a/Source/WebCore/Modules/webaudio/AnalyserNode.cpp +++ b/Source/WebCore/Modules/webaudio/AnalyserNode.cpp @@ -30,11 +30,10 @@ #include "AudioNodeInput.h" #include "AudioNodeOutput.h" -#include "ExceptionCode.h" namespace WebCore { -AnalyserNode::AnalyserNode(AudioContext* context, float sampleRate) +AnalyserNode::AnalyserNode(AudioContext& context, float sampleRate) : AudioBasicInspectorNode(context, sampleRate, 2) { setNodeType(NodeTypeAnalyser); @@ -72,40 +71,38 @@ void AnalyserNode::reset() m_analyser.reset(); } -void AnalyserNode::setFftSize(unsigned size, ExceptionCode& ec) +ExceptionOr AnalyserNode::setFftSize(unsigned size) { if (!m_analyser.setFftSize(size)) - ec = INDEX_SIZE_ERR; + return Exception { INDEX_SIZE_ERR }; + return { }; } -void AnalyserNode::setMinDecibels(double k, ExceptionCode& ec) +ExceptionOr AnalyserNode::setMinDecibels(double k) { - if (k > maxDecibels()) { - ec = INDEX_SIZE_ERR; - return; - } + if (k > maxDecibels()) + return Exception { INDEX_SIZE_ERR }; m_analyser.setMinDecibels(k); + return { }; } -void AnalyserNode::setMaxDecibels(double k, ExceptionCode& ec) +ExceptionOr AnalyserNode::setMaxDecibels(double k) { - if (k < minDecibels()) { - ec = INDEX_SIZE_ERR; - return; - } + if (k < minDecibels()) + return Exception { INDEX_SIZE_ERR }; m_analyser.setMaxDecibels(k); + return { }; } -void AnalyserNode::setSmoothingTimeConstant(double k, ExceptionCode& ec) +ExceptionOr AnalyserNode::setSmoothingTimeConstant(double k) { - if (k < 0 || k > 1) { - ec = INDEX_SIZE_ERR; - return; - } + if (k < 0 || k > 1) + return Exception { INDEX_SIZE_ERR }; m_analyser.setSmoothingTimeConstant(k); + return { }; } } // namespace WebCore diff --git a/Source/WebCore/Modules/webaudio/AnalyserNode.h b/Source/WebCore/Modules/webaudio/AnalyserNode.h index 290c67aa9..8bc63b442 100644 --- a/Source/WebCore/Modules/webaudio/AnalyserNode.h +++ b/Source/WebCore/Modules/webaudio/AnalyserNode.h @@ -22,56 +22,50 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AnalyserNode_h -#define AnalyserNode_h +#pragma once #include "AudioBasicInspectorNode.h" #include "RealtimeAnalyser.h" -#include namespace WebCore { -class AnalyserNode : public AudioBasicInspectorNode { +class AnalyserNode final : public AudioBasicInspectorNode { public: - static PassRefPtr create(AudioContext* context, float sampleRate) + static Ref create(AudioContext& context, float sampleRate) { - return adoptRef(new AnalyserNode(context, sampleRate)); + return adoptRef(*new AnalyserNode(context, sampleRate)); } virtual ~AnalyserNode(); - - // AudioNode - virtual void process(size_t framesToProcess) override; - virtual void reset() override; - // Javascript bindings unsigned fftSize() const { return m_analyser.fftSize(); } - void setFftSize(unsigned size, ExceptionCode&); + ExceptionOr setFftSize(unsigned); unsigned frequencyBinCount() const { return m_analyser.frequencyBinCount(); } - void setMinDecibels(double k, ExceptionCode&); + ExceptionOr setMinDecibels(double); double minDecibels() const { return m_analyser.minDecibels(); } - void setMaxDecibels(double k, ExceptionCode&); + ExceptionOr setMaxDecibels(double); double maxDecibels() const { return m_analyser.maxDecibels(); } - void setSmoothingTimeConstant(double k, ExceptionCode&); + ExceptionOr setSmoothingTimeConstant(double); double smoothingTimeConstant() const { return m_analyser.smoothingTimeConstant(); } - void getFloatFrequencyData(JSC::Float32Array* array) { m_analyser.getFloatFrequencyData(array); } - void getByteFrequencyData(JSC::Uint8Array* array) { m_analyser.getByteFrequencyData(array); } - void getByteTimeDomainData(JSC::Uint8Array* array) { m_analyser.getByteTimeDomainData(array); } + void getFloatFrequencyData(const RefPtr& array) { m_analyser.getFloatFrequencyData(array.get()); } + void getByteFrequencyData(const RefPtr& array) { m_analyser.getByteFrequencyData(array.get()); } + void getByteTimeDomainData(const RefPtr& array) { m_analyser.getByteTimeDomainData(array.get()); } private: - virtual double tailTime() const override { return 0; } - virtual double latencyTime() const override { return 0; } + AnalyserNode(AudioContext&, float sampleRate); - AnalyserNode(AudioContext*, float sampleRate); + void process(size_t framesToProcess) final; + void reset() final; + + double tailTime() const final { return 0; } + double latencyTime() const final { return 0; } RealtimeAnalyser m_analyser; }; } // namespace WebCore - -#endif // AnalyserNode_h diff --git a/Source/WebCore/Modules/webaudio/AnalyserNode.idl b/Source/WebCore/Modules/webaudio/AnalyserNode.idl index 458df84a6..d43f95910 100644 --- a/Source/WebCore/Modules/webaudio/AnalyserNode.idl +++ b/Source/WebCore/Modules/webaudio/AnalyserNode.idl @@ -26,21 +26,21 @@ Conditional=WEB_AUDIO, JSGenerateToJSObject, ] interface AnalyserNode : AudioNode { - [SetterRaisesException] attribute unsigned long fftSize; + [SetterMayThrowException] attribute unsigned long fftSize; readonly attribute unsigned long frequencyBinCount; // minDecibels / maxDecibels represent the range to scale the FFT analysis data for conversion to unsigned byte values. - [SetterRaisesException] attribute double minDecibels; - [SetterRaisesException] attribute double maxDecibels; + [SetterMayThrowException] attribute unrestricted double minDecibels; + [SetterMayThrowException] attribute unrestricted double maxDecibels; // A value from 0.0 -> 1.0 where 0.0 represents no time averaging with the last analysis frame. - [SetterRaisesException] attribute double smoothingTimeConstant; + [SetterMayThrowException] attribute unrestricted double smoothingTimeConstant; // Copies the current frequency data into the passed array. // If the array has fewer elements than the frequencyBinCount, the excess elements will be dropped. - void getFloatFrequencyData(Float32Array array); - void getByteFrequencyData(Uint8Array array); + void getFloatFrequencyData(Float32Array? array); // FIXME: The parameter should not be nullable. + void getByteFrequencyData(Uint8Array? array); // FIXME: The parameter should not be nullable. // Real-time waveform data - void getByteTimeDomainData(Uint8Array array); + void getByteTimeDomainData(Uint8Array? array); // FIXME: The parameter should not be nullable. }; diff --git a/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp b/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp index 3698bf5ef..292f05b8c 100644 --- a/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp +++ b/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.cpp @@ -38,7 +38,7 @@ namespace WebCore { AsyncAudioDecoder::AsyncAudioDecoder() { // Start worker thread. - MutexLocker lock(m_threadCreationMutex); + LockHolder lock(m_threadCreationMutex); m_threadID = createThread(AsyncAudioDecoder::threadEntry, this, "Audio Decoder"); } @@ -51,15 +51,12 @@ AsyncAudioDecoder::~AsyncAudioDecoder() m_threadID = 0; } -void AsyncAudioDecoder::decodeAsync(ArrayBuffer* audioData, float sampleRate, PassRefPtr successCallback, PassRefPtr errorCallback) +void AsyncAudioDecoder::decodeAsync(Ref&& audioData, float sampleRate, RefPtr&& successCallback, RefPtr&& errorCallback) { ASSERT(isMainThread()); - ASSERT(audioData); - if (!audioData) - return; - auto decodingTask = DecodingTask::create(audioData, sampleRate, successCallback, errorCallback); - m_queue.append(std::move(decodingTask)); // note that ownership of the task is effectively taken by the queue. + auto decodingTask = std::make_unique(WTFMove(audioData), sampleRate, WTFMove(successCallback), WTFMove(errorCallback)); + m_queue.append(WTFMove(decodingTask)); // note that ownership of the task is effectively taken by the queue. } // Asynchronously decode in this thread. @@ -76,7 +73,7 @@ void AsyncAudioDecoder::runLoop() { // Wait for until we have m_threadID established before starting the run loop. - MutexLocker lock(m_threadCreationMutex); + LockHolder lock(m_threadCreationMutex); } // Keep running decoding tasks until we're killed. @@ -87,40 +84,23 @@ void AsyncAudioDecoder::runLoop() } } -std::unique_ptr AsyncAudioDecoder::DecodingTask::create(ArrayBuffer* audioData, float sampleRate, PassRefPtr successCallback, PassRefPtr errorCallback) -{ - return std::unique_ptr(new DecodingTask(audioData, sampleRate, successCallback, errorCallback)); -} - -AsyncAudioDecoder::DecodingTask::DecodingTask(ArrayBuffer* audioData, float sampleRate, PassRefPtr successCallback, PassRefPtr errorCallback) - : m_audioData(audioData) +AsyncAudioDecoder::DecodingTask::DecodingTask(Ref&& audioData, float sampleRate, RefPtr&& successCallback, RefPtr&& errorCallback) + : m_audioData(WTFMove(audioData)) , m_sampleRate(sampleRate) - , m_successCallback(successCallback) - , m_errorCallback(errorCallback) + , m_successCallback(WTFMove(successCallback)) + , m_errorCallback(WTFMove(errorCallback)) { } void AsyncAudioDecoder::DecodingTask::decode() { - ASSERT(m_audioData.get()); - if (!m_audioData.get()) - return; - // Do the actual decoding and invoke the callback. m_audioBuffer = AudioBuffer::createFromAudioFileData(m_audioData->data(), m_audioData->byteLength(), false, sampleRate()); // Decoding is finished, but we need to do the callbacks on the main thread. - callOnMainThread(notifyCompleteDispatch, this); -} - -void AsyncAudioDecoder::DecodingTask::notifyCompleteDispatch(void* userData) -{ - AsyncAudioDecoder::DecodingTask* task = reinterpret_cast(userData); - ASSERT(task); - if (!task) - return; - - task->notifyComplete(); + callOnMainThread([this] { + notifyComplete(); + }); } void AsyncAudioDecoder::DecodingTask::notifyComplete() diff --git a/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h b/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h index ff096ecaf..bb5c5e337 100644 --- a/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h +++ b/Source/WebCore/Modules/webaudio/AsyncAudioDecoder.h @@ -22,13 +22,11 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AsyncAudioDecoder_h -#define AsyncAudioDecoder_h +#pragma once #include #include #include -#include #include #include @@ -51,29 +49,25 @@ public: ~AsyncAudioDecoder(); // Must be called on the main thread. - void decodeAsync(JSC::ArrayBuffer* audioData, float sampleRate, PassRefPtr successCallback, PassRefPtr errorCallback); + void decodeAsync(Ref&& audioData, float sampleRate, RefPtr&& successCallback, RefPtr&& errorCallback); private: class DecodingTask { WTF_MAKE_NONCOPYABLE(DecodingTask); public: - static std::unique_ptr create(JSC::ArrayBuffer* audioData, float sampleRate, PassRefPtr successCallback, PassRefPtr errorCallback); - + DecodingTask(Ref&& audioData, float sampleRate, RefPtr&& successCallback, RefPtr&& errorCallback); void decode(); private: - DecodingTask(JSC::ArrayBuffer* audioData, float sampleRate, PassRefPtr successCallback, PassRefPtr errorCallback); - - JSC::ArrayBuffer* audioData() { return m_audioData.get(); } + JSC::ArrayBuffer& audioData() { return m_audioData; } float sampleRate() const { return m_sampleRate; } AudioBufferCallback* successCallback() { return m_successCallback.get(); } AudioBufferCallback* errorCallback() { return m_errorCallback.get(); } AudioBuffer* audioBuffer() { return m_audioBuffer.get(); } - static void notifyCompleteDispatch(void* userData); void notifyComplete(); - RefPtr m_audioData; + Ref m_audioData; float m_sampleRate; RefPtr m_successCallback; RefPtr m_errorCallback; @@ -84,10 +78,8 @@ private: void runLoop(); WTF::ThreadIdentifier m_threadID; - Mutex m_threadCreationMutex; + Lock m_threadCreationMutex; MessageQueue m_queue; }; } // namespace WebCore - -#endif // AsyncAudioDecoder_h diff --git a/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp b/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp index 58fd9b2ab..27e56c4c6 100644 --- a/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp +++ b/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.cpp @@ -28,15 +28,13 @@ #include "AudioBasicInspectorNode.h" -#include "AudioContext.h" #include "AudioNodeInput.h" #include "AudioNodeOutput.h" namespace WebCore { -AudioBasicInspectorNode::AudioBasicInspectorNode(AudioContext* context, float sampleRate, unsigned outputChannelCount) +AudioBasicInspectorNode::AudioBasicInspectorNode(AudioContext& context, float sampleRate, unsigned outputChannelCount) : AudioNode(context, sampleRate) - , m_needAutomaticPull(false) { addInput(std::make_unique(this)); addOutput(std::make_unique(this, outputChannelCount)); @@ -51,29 +49,31 @@ void AudioBasicInspectorNode::pullInputs(size_t framesToProcess) input(0)->pull(output(0)->bus(), framesToProcess); } -void AudioBasicInspectorNode::connect(AudioNode* destination, unsigned outputIndex, unsigned inputIndex, ExceptionCode& ec) +ExceptionOr AudioBasicInspectorNode::connect(AudioNode& destination, unsigned outputIndex, unsigned inputIndex) { ASSERT(isMainThread()); - AudioContext::AutoLocker locker(*context()); + AudioContext::AutoLocker locker(context()); - AudioNode::connect(destination, outputIndex, inputIndex, ec); + auto result = AudioNode::connect(destination, outputIndex, inputIndex); updatePullStatus(); + return result; } -void AudioBasicInspectorNode::disconnect(unsigned outputIndex, ExceptionCode& ec) +ExceptionOr AudioBasicInspectorNode::disconnect(unsigned outputIndex) { ASSERT(isMainThread()); - AudioContext::AutoLocker locker(*context()); + AudioContext::AutoLocker locker(context()); - AudioNode::disconnect(outputIndex, ec); + auto result = AudioNode::disconnect(outputIndex); updatePullStatus(); + return result; } void AudioBasicInspectorNode::checkNumberOfChannelsForInput(AudioNodeInput* input) { - ASSERT(context()->isAudioThread() && context()->isGraphOwner()); + ASSERT(context().isAudioThread() && context().isGraphOwner()); ASSERT(input == this->input(0)); if (input != this->input(0)) @@ -93,13 +93,13 @@ void AudioBasicInspectorNode::checkNumberOfChannelsForInput(AudioNodeInput* inpu void AudioBasicInspectorNode::updatePullStatus() { - ASSERT(context()->isGraphOwner()); + ASSERT(context().isGraphOwner()); if (output(0)->isConnected()) { // When an AudioBasicInspectorNode is connected to a downstream node, it will get pulled by the // downstream node, thus remove it from the context's automatic pull list. if (m_needAutomaticPull) { - context()->removeAutomaticPullNode(this); + context().removeAutomaticPullNode(this); m_needAutomaticPull = false; } } else { @@ -107,11 +107,11 @@ void AudioBasicInspectorNode::updatePullStatus() if (numberOfInputConnections && !m_needAutomaticPull) { // When an AudioBasicInspectorNode is not connected to any downstream node while still connected from // upstream node(s), add it to the context's automatic pull list. - context()->addAutomaticPullNode(this); + context().addAutomaticPullNode(this); m_needAutomaticPull = true; } else if (!numberOfInputConnections && m_needAutomaticPull) { // The AudioBasicInspectorNode is connected to nothing, remove it from the context's automatic pull list. - context()->removeAutomaticPullNode(this); + context().removeAutomaticPullNode(this); m_needAutomaticPull = false; } } diff --git a/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h b/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h index 2f4258ea3..b86f1a387 100644 --- a/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h +++ b/Source/WebCore/Modules/webaudio/AudioBasicInspectorNode.h @@ -22,8 +22,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AudioBasicInspectorNode_h -#define AudioBasicInspectorNode_h +#pragma once #include "AudioNode.h" @@ -34,19 +33,17 @@ namespace WebCore { // AudioContext before the end of each render quantum so that it can inspect the audio stream. class AudioBasicInspectorNode : public AudioNode { public: - AudioBasicInspectorNode(AudioContext*, float sampleRate, unsigned outputChannelCount); - - // AudioNode - virtual void pullInputs(size_t framesToProcess) override; - virtual void connect(AudioNode*, unsigned outputIndex, unsigned inputIndex, ExceptionCode&) override; - virtual void disconnect(unsigned outputIndex, ExceptionCode&) override; - virtual void checkNumberOfChannelsForInput(AudioNodeInput*) override; + AudioBasicInspectorNode(AudioContext&, float sampleRate, unsigned outputChannelCount); private: + void pullInputs(size_t framesToProcess) override; + ExceptionOr connect(AudioNode&, unsigned outputIndex, unsigned inputIndex) override; + ExceptionOr disconnect(unsigned outputIndex) override; + void checkNumberOfChannelsForInput(AudioNodeInput*) override; + void updatePullStatus(); - bool m_needAutomaticPull; // When setting to true, AudioBasicInspectorNode will be pulled automaticlly by AudioContext before the end of each render quantum. + + bool m_needAutomaticPull { false }; // When setting to true, AudioBasicInspectorNode will be pulled automatically by AudioContext before the end of each render quantum. }; } // namespace WebCore - -#endif // AudioBasicInspectorNode_h diff --git a/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp b/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp index afa67a9ad..4279a0916 100644 --- a/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp +++ b/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.cpp @@ -36,7 +36,7 @@ namespace WebCore { -AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext* context, float sampleRate) +AudioBasicProcessorNode::AudioBasicProcessorNode(AudioContext& context, float sampleRate) : AudioNode(context, sampleRate) { addInput(std::make_unique(this)); @@ -102,7 +102,7 @@ void AudioBasicProcessorNode::reset() // uninitialize and then re-initialize with the new channel count. void AudioBasicProcessorNode::checkNumberOfChannelsForInput(AudioNodeInput* input) { - ASSERT(context()->isAudioThread() && context()->isGraphOwner()); + ASSERT(context().isAudioThread() && context().isGraphOwner()); ASSERT(input == this->input(0)); if (input != this->input(0)) diff --git a/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h b/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h index d8946e1c9..e7e116b75 100644 --- a/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h +++ b/Source/WebCore/Modules/webaudio/AudioBasicProcessorNode.h @@ -22,12 +22,10 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AudioBasicProcessorNode_h -#define AudioBasicProcessorNode_h +#pragma once #include "AudioNode.h" #include -#include #include #include @@ -40,29 +38,27 @@ class AudioProcessor; // AudioBasicProcessorNode is an AudioNode with one input and one output where the input and output have the same number of channels. class AudioBasicProcessorNode : public AudioNode { public: - AudioBasicProcessorNode(AudioContext*, float sampleRate); + AudioBasicProcessorNode(AudioContext&, float sampleRate); // AudioNode - virtual void process(size_t framesToProcess) override; - virtual void pullInputs(size_t framesToProcess) override; - virtual void reset() override; - virtual void initialize() override; - virtual void uninitialize() override; + void process(size_t framesToProcess) override; + void pullInputs(size_t framesToProcess) override; + void reset() override; + void initialize() override; + void uninitialize() override; // Called in the main thread when the number of channels for the input may have changed. - virtual void checkNumberOfChannelsForInput(AudioNodeInput*) override; + void checkNumberOfChannelsForInput(AudioNodeInput*) override; // Returns the number of channels for both the input and the output. unsigned numberOfChannels(); protected: - virtual double tailTime() const override; - virtual double latencyTime() const override; + double tailTime() const override; + double latencyTime() const override; AudioProcessor* processor() { return m_processor.get(); } std::unique_ptr m_processor; }; } // namespace WebCore - -#endif // AudioBasicProcessorNode_h diff --git a/Source/WebCore/Modules/webaudio/AudioBuffer.cpp b/Source/WebCore/Modules/webaudio/AudioBuffer.cpp index 609ab567e..d14e6694c 100644 --- a/Source/WebCore/Modules/webaudio/AudioBuffer.cpp +++ b/Source/WebCore/Modules/webaudio/AudioBuffer.cpp @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -32,101 +32,110 @@ #include "AudioBuffer.h" -#include "AudioBus.h" #include "AudioContext.h" #include "AudioFileReader.h" -#include "ExceptionCode.h" -#include "ExceptionCodePlaceholder.h" - -#include +#include #include namespace WebCore { -PassRefPtr AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) +RefPtr AudioBuffer::create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) { if (sampleRate < 22050 || sampleRate > 96000 || numberOfChannels > AudioContext::maxNumberOfChannels() || !numberOfFrames) return nullptr; - - return adoptRef(new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate)); + + auto buffer = adoptRef(*new AudioBuffer(numberOfChannels, numberOfFrames, sampleRate)); + if (!buffer->m_length) + return nullptr; + + return WTFMove(buffer); } -PassRefPtr AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate) +RefPtr AudioBuffer::createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate) { RefPtr bus = createBusFromInMemoryAudioFile(data, dataSize, mixToMono, sampleRate); - if (bus.get()) - return adoptRef(new AudioBuffer(bus.get())); - - return nullptr; + if (!bus) + return nullptr; + return adoptRef(*new AudioBuffer(*bus)); } AudioBuffer::AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) - : m_gain(1.0) - , m_sampleRate(sampleRate) + : m_sampleRate(sampleRate) , m_length(numberOfFrames) { m_channels.reserveCapacity(numberOfChannels); for (unsigned i = 0; i < numberOfChannels; ++i) { - RefPtr channelDataArray = Float32Array::create(m_length); + auto channelDataArray = Float32Array::create(m_length); + if (!channelDataArray) { + invalidate(); + break; + } + channelDataArray->setNeuterable(false); - m_channels.append(channelDataArray); + m_channels.append(WTFMove(channelDataArray)); } } -AudioBuffer::AudioBuffer(AudioBus* bus) - : m_gain(1.0) - , m_sampleRate(bus->sampleRate()) - , m_length(bus->length()) +AudioBuffer::AudioBuffer(AudioBus& bus) + : m_sampleRate(bus.sampleRate()) + , m_length(bus.length()) { // Copy audio data from the bus to the Float32Arrays we manage. - unsigned numberOfChannels = bus->numberOfChannels(); + unsigned numberOfChannels = bus.numberOfChannels(); m_channels.reserveCapacity(numberOfChannels); for (unsigned i = 0; i < numberOfChannels; ++i) { - RefPtr channelDataArray = Float32Array::create(m_length); + auto channelDataArray = Float32Array::create(m_length); + if (!channelDataArray) { + invalidate(); + break; + } + channelDataArray->setNeuterable(false); - channelDataArray->setRange(bus->channel(i)->data(), m_length, 0); - m_channels.append(channelDataArray); + channelDataArray->setRange(bus.channel(i)->data(), m_length, 0); + m_channels.append(WTFMove(channelDataArray)); } } +void AudioBuffer::invalidate() +{ + releaseMemory(); + m_length = 0; +} + void AudioBuffer::releaseMemory() { m_channels.clear(); } -PassRefPtr AudioBuffer::getChannelData(unsigned channelIndex, ExceptionCode& ec) +ExceptionOr> AudioBuffer::getChannelData(unsigned channelIndex) { - if (channelIndex >= m_channels.size()) { - ec = SYNTAX_ERR; - return nullptr; - } - - Float32Array* channelData = m_channels[channelIndex].get(); - return Float32Array::create(channelData->buffer(), channelData->byteOffset(), channelData->length()); + if (channelIndex >= m_channels.size()) + return Exception { SYNTAX_ERR }; + auto& channelData = *m_channels[channelIndex]; + auto array = Float32Array::create(channelData.unsharedBuffer(), channelData.byteOffset(), channelData.length()); + RELEASE_ASSERT(array); + return array.releaseNonNull(); } -Float32Array* AudioBuffer::getChannelData(unsigned channelIndex) +Float32Array* AudioBuffer::channelData(unsigned channelIndex) { if (channelIndex >= m_channels.size()) return nullptr; - return m_channels[channelIndex].get(); } void AudioBuffer::zero() { - for (unsigned i = 0; i < m_channels.size(); ++i) { - if (getChannelData(i)) - getChannelData(i)->zeroRange(0, length()); - } + for (auto& channel : m_channels) + channel->zeroRange(0, length()); } size_t AudioBuffer::memoryCost() const { size_t cost = 0; - for (unsigned i = 0; i < m_channels.size() ; ++i) - cost += m_channels[i]->byteLength(); + for (auto& channel : m_channels) + cost += channel->byteLength(); return cost; } diff --git a/Source/WebCore/Modules/webaudio/AudioBuffer.h b/Source/WebCore/Modules/webaudio/AudioBuffer.h index d52c02805..9c9dd1dac 100644 --- a/Source/WebCore/Modules/webaudio/AudioBuffer.h +++ b/Source/WebCore/Modules/webaudio/AudioBuffer.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -26,27 +26,22 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AudioBuffer_h -#define AudioBuffer_h +#pragma once +#include "ExceptionOr.h" #include -#include -#include -#include #include namespace WebCore { class AudioBus; -typedef int ExceptionCode; - class AudioBuffer : public RefCounted { public: - static PassRefPtr create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate); + static RefPtr create(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate); - // Returns 0 if data is not a valid audio file. - static PassRefPtr createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate); + // Returns nullptr if data is not a valid audio file. + static RefPtr createFromAudioFileData(const void* data, size_t dataSize, bool mixToMono, float sampleRate); // Format size_t length() const { return m_length; } @@ -55,32 +50,31 @@ public: // Channel data access unsigned numberOfChannels() const { return m_channels.size(); } - PassRefPtr getChannelData(unsigned channelIndex, ExceptionCode&); - Float32Array* getChannelData(unsigned channelIndex); + ExceptionOr> getChannelData(unsigned channelIndex); + Float32Array* channelData(unsigned channelIndex); void zero(); // Scalar gain double gain() const { return m_gain; } void setGain(double gain) { m_gain = gain; } - // Because an AudioBuffer has a JavaScript wrapper, which will be garbage collected, it may take awhile for this object to be deleted. + // Because an AudioBuffer has a JavaScript wrapper, which will be garbage collected, it may take a while for this object to be deleted. // releaseMemory() can be called when the AudioContext goes away, so we can release the memory earlier than when the garbage collection happens. // Careful! Only call this when the page unloads, after the AudioContext is no longer processing. void releaseMemory(); size_t memoryCost() const; -protected: +private: AudioBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate); - explicit AudioBuffer(AudioBus*); + explicit AudioBuffer(AudioBus&); + + void invalidate(); - double m_gain; // scalar gain + double m_gain { 1.0 }; // scalar gain float m_sampleRate; size_t m_length; - Vector> m_channels; }; } // namespace WebCore - -#endif // AudioBuffer_h diff --git a/Source/WebCore/Modules/webaudio/AudioBuffer.idl b/Source/WebCore/Modules/webaudio/AudioBuffer.idl index 37a570040..a1a902e06 100644 --- a/Source/WebCore/Modules/webaudio/AudioBuffer.idl +++ b/Source/WebCore/Modules/webaudio/AudioBuffer.idl @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -29,14 +29,15 @@ [ Conditional=WEB_AUDIO, ImplementationLacksVTable, + ReportExtraMemoryCost, ] interface AudioBuffer { readonly attribute long length; // in sample-frames - readonly attribute float duration; // in seconds - readonly attribute float sampleRate; // in sample-frames per second + readonly attribute unrestricted float duration; // in seconds + readonly attribute unrestricted float sampleRate; // in sample-frames per second - attribute float gain; // linear gain (default 1.0) + attribute unrestricted float gain; // linear gain (default 1.0) // Channel access readonly attribute unsigned long numberOfChannels; - [RaisesException] Float32Array getChannelData(unsigned long channelIndex); + [MayThrowException] Float32Array getChannelData(unsigned long channelIndex); }; diff --git a/Source/WebCore/Modules/webaudio/AudioBufferCallback.h b/Source/WebCore/Modules/webaudio/AudioBufferCallback.h index feebc469b..27cc6ac79 100644 --- a/Source/WebCore/Modules/webaudio/AudioBufferCallback.h +++ b/Source/WebCore/Modules/webaudio/AudioBufferCallback.h @@ -22,8 +22,7 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AudioBufferCallback_h -#define AudioBufferCallback_h +#pragma once #if ENABLE(WEB_AUDIO) @@ -39,8 +38,6 @@ public: virtual bool handleEvent(AudioBuffer*) = 0; }; -} // namespace +} // namespace WebCore #endif // ENABLE(WEB_AUDIO) - -#endif // AudioBufferCallback_h diff --git a/Source/WebCore/Modules/webaudio/AudioBufferCallback.idl b/Source/WebCore/Modules/webaudio/AudioBufferCallback.idl index 913f57779..74577fd0e 100644 --- a/Source/WebCore/Modules/webaudio/AudioBufferCallback.idl +++ b/Source/WebCore/Modules/webaudio/AudioBufferCallback.idl @@ -25,6 +25,4 @@ [ Conditional=WEB_AUDIO, JSGenerateToJSObject, -] callback interface AudioBufferCallback { - boolean handleEvent(AudioBuffer audioBuffer); -}; +] callback AudioBufferCallback = void (AudioBuffer audioBuffer); diff --git a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp index 8ee6d61b9..11319a85c 100644 --- a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp +++ b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp @@ -28,17 +28,14 @@ #include "AudioBufferSourceNode.h" +#include "AudioBuffer.h" #include "AudioContext.h" #include "AudioNodeOutput.h" +#include "AudioParam.h" #include "AudioUtilities.h" #include "FloatConversion.h" -#include "ScriptCallStack.h" -#include "ScriptController.h" +#include "PannerNode.h" #include "ScriptExecutionContext.h" -#include -#include -#include -#include namespace WebCore { @@ -49,14 +46,14 @@ const double DefaultGrainDuration = 0.020; // 20ms // to minimize linear interpolation aliasing. const double MaxRate = 1024; -PassRefPtr AudioBufferSourceNode::create(AudioContext* context, float sampleRate) +Ref AudioBufferSourceNode::create(AudioContext& context, float sampleRate) { - return adoptRef(new AudioBufferSourceNode(context, sampleRate)); + return adoptRef(*new AudioBufferSourceNode(context, sampleRate)); } -AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sampleRate) +AudioBufferSourceNode::AudioBufferSourceNode(AudioContext& context, float sampleRate) : AudioScheduledSourceNode(context, sampleRate) - , m_buffer(0) + , m_buffer(nullptr) , m_isLooping(false) , m_loopStart(0) , m_loopEnd(0) @@ -65,12 +62,12 @@ AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sample , m_grainOffset(0.0) , m_grainDuration(DefaultGrainDuration) , m_lastGain(1.0) - , m_pannerNode(0) + , m_pannerNode(nullptr) { setNodeType(NodeTypeAudioBufferSource); m_gain = AudioParam::create(context, "gain", 1.0, 0.0, 1.0); - m_playbackRate = AudioParam::create(context, "playbackRate", 1.0, 0.0, MaxRate); + m_playbackRate = AudioParam::create(context, "playbackRate", 1.0, -MaxRate, MaxRate); // Default to mono. A call to setBuffer() will set the number of output channels to that of the buffer. addOutput(std::make_unique(this, 1)); @@ -86,23 +83,23 @@ AudioBufferSourceNode::~AudioBufferSourceNode() void AudioBufferSourceNode::process(size_t framesToProcess) { - AudioBus* outputBus = output(0)->bus(); + auto& outputBus = *output(0)->bus(); if (!isInitialized()) { - outputBus->zero(); + outputBus.zero(); return; } // The audio thread can't block on this lock, so we use std::try_to_lock instead. - std::unique_lock lock(m_processMutex, std::try_to_lock); + std::unique_lock lock(m_processMutex, std::try_to_lock); if (!lock.owns_lock()) { // Too bad - the try_lock() failed. We must be in the middle of changing buffers and were already outputting silence anyway. - outputBus->zero(); + outputBus.zero(); return; } if (!buffer()) { - outputBus->zero(); + outputBus.zero(); return; } @@ -110,33 +107,32 @@ void AudioBufferSourceNode::process(size_t framesToProcess) // before the output bus is updated to the new number of channels because of use of tryLocks() in the context's updating system. // In this case, if the the buffer has just been changed and we're not quite ready yet, then just output silence. if (numberOfChannels() != buffer()->numberOfChannels()) { - outputBus->zero(); + outputBus.zero(); return; } size_t quantumFrameOffset; size_t bufferFramesToProcess; - updateSchedulingInfo(framesToProcess, outputBus, quantumFrameOffset, bufferFramesToProcess); if (!bufferFramesToProcess) { - outputBus->zero(); + outputBus.zero(); return; } - for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i) - m_destinationChannels[i] = outputBus->channel(i)->mutableData(); + for (unsigned i = 0; i < outputBus.numberOfChannels(); ++i) + m_destinationChannels[i] = outputBus.channel(i)->mutableData(); // Render by reading directly from the buffer. - if (!renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProcess)) { - outputBus->zero(); + if (!renderFromBuffer(&outputBus, quantumFrameOffset, bufferFramesToProcess)) { + outputBus.zero(); return; } // Apply the gain (in-place) to the output bus. float totalGain = gain()->value() * m_buffer->gain(); - outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain); - outputBus->clearSilentFlag(); + outputBus.copyWithGainFrom(outputBus, &m_lastGain, totalGain); + outputBus.clearSilentFlag(); } // Returns true if we're finished. @@ -160,7 +156,7 @@ bool AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping(AudioBus*, unsign bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destinationFrameOffset, size_t numberOfFrames) { - ASSERT(context()->isAudioThread()); + ASSERT(context().isAudioThread()); // Basic sanity checking ASSERT(bus); @@ -200,47 +196,54 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination size_t bufferLength = buffer()->length(); double bufferSampleRate = buffer()->sampleRate(); + double pitchRate = totalPitchRate(); + bool reverse = pitchRate < 0; // Avoid converting from time to sample-frames twice by computing // the grain end time first before computing the sample frame. - unsigned endFrame = m_isGrain ? AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, bufferSampleRate) : bufferLength; - - // This is a HACK to allow for HRTF tail-time - avoids glitch at end. - // FIXME: implement tailTime for each AudioNode for a more general solution to this problem. - // https://bugs.webkit.org/show_bug.cgi?id=77224 + unsigned maxFrame; if (m_isGrain) - endFrame += 512; + maxFrame = AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, bufferSampleRate); + else + maxFrame = bufferLength; // Do some sanity checking. - if (endFrame > bufferLength) - endFrame = bufferLength; - if (m_virtualReadIndex >= endFrame) + if (maxFrame > bufferLength) + maxFrame = bufferLength; + if (reverse && m_virtualReadIndex <= 0) + m_virtualReadIndex = maxFrame - 1; + else if (!reverse && m_virtualReadIndex >= maxFrame) m_virtualReadIndex = 0; // reset to start // If the .loop attribute is true, then values of m_loopStart == 0 && m_loopEnd == 0 implies // that we should use the entire buffer as the loop, otherwise use the loop values in m_loopStart and m_loopEnd. - double virtualEndFrame = endFrame; - double virtualDeltaFrames = endFrame; + double virtualMaxFrame = maxFrame; + double virtualMinFrame = 0; + double virtualDeltaFrames = maxFrame; if (loop() && (m_loopStart || m_loopEnd) && m_loopStart >= 0 && m_loopEnd > 0 && m_loopStart < m_loopEnd) { // Convert from seconds to sample-frames. - double loopStartFrame = m_loopStart * buffer()->sampleRate(); - double loopEndFrame = m_loopEnd * buffer()->sampleRate(); + double loopMinFrame = m_loopStart * buffer()->sampleRate(); + double loopMaxFrame = m_loopEnd * buffer()->sampleRate(); - virtualEndFrame = std::min(loopEndFrame, virtualEndFrame); - virtualDeltaFrames = virtualEndFrame - loopStartFrame; + virtualMaxFrame = std::min(loopMaxFrame, virtualMaxFrame); + virtualMinFrame = std::max(loopMinFrame, virtualMinFrame); + virtualDeltaFrames = virtualMaxFrame - virtualMinFrame; } - double pitchRate = totalPitchRate(); - // Sanity check that our playback rate isn't larger than the loop size. - if (pitchRate >= virtualDeltaFrames) + if (fabs(pitchRate) >= virtualDeltaFrames) return false; // Get local copy. double virtualReadIndex = m_virtualReadIndex; + bool needsInterpolation = virtualReadIndex != floor(virtualReadIndex) + || virtualDeltaFrames != floor(virtualDeltaFrames) + || virtualMaxFrame != floor(virtualMaxFrame) + || virtualMinFrame != floor(virtualMinFrame); + // Render loop - reading from the source buffer to the destination using linear interpolation. int framesToProcess = numberOfFrames; @@ -249,14 +252,12 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination // Optimize for the very common case of playing back with pitchRate == 1. // We can avoid the linear interpolation. - if (pitchRate == 1 && virtualReadIndex == floor(virtualReadIndex) - && virtualDeltaFrames == floor(virtualDeltaFrames) - && virtualEndFrame == floor(virtualEndFrame)) { + if (pitchRate == 1 && !needsInterpolation) { unsigned readIndex = static_cast(virtualReadIndex); unsigned deltaFrames = static_cast(virtualDeltaFrames); - endFrame = static_cast(virtualEndFrame); + maxFrame = static_cast(virtualMaxFrame); while (framesToProcess > 0) { - int framesToEnd = endFrame - readIndex; + int framesToEnd = maxFrame - readIndex; int framesThisTime = std::min(framesToProcess, framesToEnd); framesThisTime = std::max(0, framesThisTime); @@ -268,13 +269,83 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination framesToProcess -= framesThisTime; // Wrap-around. - if (readIndex >= endFrame) { + if (readIndex >= maxFrame) { readIndex -= deltaFrames; if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess)) break; } } virtualReadIndex = readIndex; + } else if (pitchRate == -1 && !needsInterpolation) { + int readIndex = static_cast(virtualReadIndex); + int deltaFrames = static_cast(virtualDeltaFrames); + int minFrame = static_cast(virtualMinFrame) - 1; + while (framesToProcess > 0) { + int framesToEnd = readIndex - minFrame; + int framesThisTime = std::min(framesToProcess, framesToEnd); + framesThisTime = std::max(0, framesThisTime); + + while (framesThisTime--) { + for (unsigned i = 0; i < numberOfChannels; ++i) { + float* destination = destinationChannels[i]; + const float* source = sourceChannels[i]; + + destination[writeIndex] = source[readIndex]; + } + + ++writeIndex; + --readIndex; + --framesToProcess; + } + + // Wrap-around. + if (readIndex <= minFrame) { + readIndex += deltaFrames; + if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess)) + break; + } + } + virtualReadIndex = readIndex; + } else if (!pitchRate) { + unsigned readIndex = static_cast(virtualReadIndex); + + for (unsigned i = 0; i < numberOfChannels; ++i) + std::fill_n(destinationChannels[i], framesToProcess, sourceChannels[i][readIndex]); + } else if (reverse) { + unsigned maxFrame = static_cast(virtualMaxFrame); + unsigned minFrame = static_cast(floorf(virtualMinFrame)); + + while (framesToProcess--) { + unsigned readIndex = static_cast(floorf(virtualReadIndex)); + double interpolationFactor = virtualReadIndex - readIndex; + + unsigned readIndex2 = readIndex + 1; + if (readIndex2 >= maxFrame) + readIndex2 = loop() ? minFrame : maxFrame - 1; + + // Linear interpolation. + for (unsigned i = 0; i < numberOfChannels; ++i) { + float* destination = destinationChannels[i]; + const float* source = sourceChannels[i]; + + double sample1 = source[readIndex]; + double sample2 = source[readIndex2]; + double sample = (1.0 - interpolationFactor) * sample1 + interpolationFactor * sample2; + + destination[writeIndex] = narrowPrecisionToFloat(sample); + } + + writeIndex++; + + virtualReadIndex += pitchRate; + + // Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point. + if (virtualReadIndex < virtualMinFrame) { + virtualReadIndex += virtualDeltaFrames; + if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess)) + break; + } + } } else { while (framesToProcess--) { unsigned readIndex = static_cast(virtualReadIndex); @@ -311,7 +382,7 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination virtualReadIndex += pitchRate; // Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point. - if (virtualReadIndex >= virtualEndFrame) { + if (virtualReadIndex >= virtualMaxFrame) { virtualReadIndex -= virtualDeltaFrames; if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess)) break; @@ -333,22 +404,20 @@ void AudioBufferSourceNode::reset() m_lastGain = gain()->value(); } -bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer) +void AudioBufferSourceNode::setBuffer(RefPtr&& buffer) { ASSERT(isMainThread()); // The context must be locked since changing the buffer can re-configure the number of channels that are output. - AudioContext::AutoLocker contextLocker(*context()); + AudioContext::AutoLocker contextLocker(context()); // This synchronizes with process(). - std::lock_guard lock(m_processMutex); + std::lock_guard lock(m_processMutex); if (buffer) { // Do any necesssary re-configuration to the buffer's number of channels. unsigned numberOfChannels = buffer->numberOfChannels(); - - if (numberOfChannels > AudioContext::maxNumberOfChannels()) - return false; + ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels()); output(0)->setNumberOfChannels(numberOfChannels); @@ -356,13 +425,11 @@ bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer) m_destinationChannels = std::make_unique(numberOfChannels); for (unsigned i = 0; i < numberOfChannels; ++i) - m_sourceChannels[i] = buffer->getChannelData(i)->data(); + m_sourceChannels[i] = buffer->channelData(i)->data(); } m_virtualReadIndex = 0; - m_buffer = buffer; - - return true; + m_buffer = WTFMove(buffer); } unsigned AudioBufferSourceNode::numberOfChannels() @@ -370,61 +437,67 @@ unsigned AudioBufferSourceNode::numberOfChannels() return output(0)->numberOfChannels(); } -void AudioBufferSourceNode::startGrain(double when, double grainOffset, ExceptionCode& ec) +ExceptionOr AudioBufferSourceNode::start(double when, double grainOffset, std::optional optionalGrainDuration) { - // Duration of 0 has special value, meaning calculate based on the entire buffer's duration. - startGrain(when, grainOffset, 0, ec); + double grainDuration = 0; + if (optionalGrainDuration) + grainDuration = optionalGrainDuration.value(); + else if (buffer()) + grainDuration = buffer()->duration() - grainOffset; + + return startPlaying(Partial, when, grainOffset, grainDuration); } -void AudioBufferSourceNode::startGrain(double when, double grainOffset, double grainDuration, ExceptionCode& ec) +ExceptionOr AudioBufferSourceNode::startPlaying(BufferPlaybackMode playbackMode, double when, double grainOffset, double grainDuration) { ASSERT(isMainThread()); - if (ScriptController::processingUserGesture()) - context()->removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction); + context().nodeWillBeginPlayback(); - if (m_playbackState != UNSCHEDULED_STATE) { - ec = INVALID_STATE_ERR; - return; - } + if (m_playbackState != UNSCHEDULED_STATE) + return Exception { INVALID_STATE_ERR }; + + if (!std::isfinite(when) || (when < 0)) + return Exception { INVALID_STATE_ERR }; + + if (!std::isfinite(grainOffset) || (grainOffset < 0)) + return Exception { INVALID_STATE_ERR }; + + if (!std::isfinite(grainDuration) || (grainDuration < 0)) + return Exception { INVALID_STATE_ERR }; if (!buffer()) - return; - - // Do sanity checking of grain parameters versus buffer size. - double bufferDuration = buffer()->duration(); + return { }; - grainOffset = std::max(0.0, grainOffset); - grainOffset = std::min(bufferDuration, grainOffset); - m_grainOffset = grainOffset; + m_isGrain = playbackMode == Partial; + if (m_isGrain) { + // Do sanity checking of grain parameters versus buffer size. + double bufferDuration = buffer()->duration(); - // Handle default/unspecified duration. - double maxDuration = bufferDuration - grainOffset; - if (!grainDuration) - grainDuration = maxDuration; + m_grainOffset = std::min(bufferDuration, grainOffset); - grainDuration = std::max(0.0, grainDuration); - grainDuration = std::min(maxDuration, grainDuration); - m_grainDuration = grainDuration; + double maxDuration = bufferDuration - m_grainOffset; + m_grainDuration = std::min(maxDuration, grainDuration); + } else { + m_grainOffset = 0.0; + m_grainDuration = buffer()->duration(); + } - m_isGrain = true; m_startTime = when; // We call timeToSampleFrame here since at playbackRate == 1 we don't want to go through linear interpolation // at a sub-sample position since it will degrade the quality. // When aligned to the sample-frame the playback will be identical to the PCM data stored in the buffer. // Since playbackRate == 1 is very common, it's worth considering quality. - m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate()); + if (totalPitchRate() < 0) + m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, buffer()->sampleRate()) - 1; + else + m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate()); m_playbackState = SCHEDULED_STATE; -} -#if ENABLE(LEGACY_WEB_AUDIO) -void AudioBufferSourceNode::noteGrainOn(double when, double grainOffset, double grainDuration, ExceptionCode& ec) -{ - startGrain(when, grainOffset, grainDuration, ec); + return { }; } -#endif double AudioBufferSourceNode::totalPitchRate() { @@ -442,11 +515,7 @@ double AudioBufferSourceNode::totalPitchRate() double totalRate = dopplerRate * sampleRateFactor * basePitchRate; - // Sanity check the total rate. It's very important that the resampler not get any bad rate values. - totalRate = std::max(0.0, totalRate); - if (!totalRate) - totalRate = 1; // zero rate is considered illegal - totalRate = std::min(MaxRate, totalRate); + totalRate = std::max(-MaxRate, std::min(MaxRate, totalRate)); bool isTotalRateValid = !std::isnan(totalRate) && !std::isinf(totalRate); ASSERT(isTotalRateValid); @@ -459,8 +528,8 @@ double AudioBufferSourceNode::totalPitchRate() bool AudioBufferSourceNode::looping() { static bool firstTime = true; - if (firstTime && context() && context()->scriptExecutionContext()) { - context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead."); + if (firstTime && context().scriptExecutionContext()) { + context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead.")); firstTime = false; } @@ -470,8 +539,8 @@ bool AudioBufferSourceNode::looping() void AudioBufferSourceNode::setLooping(bool looping) { static bool firstTime = true; - if (firstTime && context() && context()->scriptExecutionContext()) { - context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead."); + if (firstTime && context().scriptExecutionContext()) { + context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead.")); firstTime = false; } @@ -499,7 +568,7 @@ void AudioBufferSourceNode::clearPannerNode() { if (m_pannerNode) { m_pannerNode->deref(AudioNode::RefTypeConnection); - m_pannerNode = 0; + m_pannerNode = nullptr; } } diff --git a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h index 15fc56a0b..f4e63a859 100644 --- a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h +++ b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.h @@ -22,40 +22,32 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AudioBufferSourceNode_h -#define AudioBufferSourceNode_h +#pragma once -#include "AudioBuffer.h" -#include "AudioBus.h" -#include "AudioParam.h" #include "AudioScheduledSourceNode.h" -#include "ExceptionCode.h" -#include "PannerNode.h" -#include -#include -#include -#include +#include namespace WebCore { -class AudioContext; +class AudioBuffer; +class PannerNode; // AudioBufferSourceNode is an AudioNode representing an audio source from an in-memory audio asset represented by an AudioBuffer. // It generally will be used for short sounds which require a high degree of scheduling flexibility (can playback in rhythmically perfect ways). -class AudioBufferSourceNode : public AudioScheduledSourceNode { +class AudioBufferSourceNode final : public AudioScheduledSourceNode { public: - static PassRefPtr create(AudioContext*, float sampleRate); + static Ref create(AudioContext&, float sampleRate); virtual ~AudioBufferSourceNode(); // AudioNode - virtual void process(size_t framesToProcess) override; - virtual void reset() override; + void process(size_t framesToProcess) final; + void reset() final; // setBuffer() is called on the main thread. This is the buffer we use for playback. // returns true on success. - bool setBuffer(AudioBuffer*); + void setBuffer(RefPtr&&); AudioBuffer* buffer() { return m_buffer.get(); } // numberOfChannels() returns the number of output channels. This value equals the number of channels from the buffer. @@ -63,12 +55,7 @@ public: unsigned numberOfChannels(); // Play-state - void startGrain(double when, double grainOffset, ExceptionCode&); - void startGrain(double when, double grainOffset, double grainDuration, ExceptionCode&); - -#if ENABLE(LEGACY_WEB_AUDIO) - void noteGrainOn(double when, double grainOffset, double grainDuration, ExceptionCode&); -#endif + ExceptionOr start(double when, double grainOffset, std::optional grainDuration); // Note: the attribute was originally exposed as .looping, but to be more consistent in naming with