summaryrefslogtreecommitdiff
path: root/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp')
-rw-r--r--Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp75
1 files changed, 46 insertions, 29 deletions
diff --git a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
index cb3297bd4..b95e093d0 100644
--- a/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
+++ b/Source/WebCore/Modules/webaudio/ScriptProcessorNode.cpp
@@ -35,12 +35,13 @@
#include "AudioNodeOutput.h"
#include "AudioProcessingEvent.h"
#include "Document.h"
+#include "EventNames.h"
#include <runtime/Float32Array.h>
#include <wtf/MainThread.h>
namespace WebCore {
-PassRefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+RefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
{
// Check for valid buffer size.
switch (bufferSize) {
@@ -65,10 +66,10 @@ PassRefPtr<ScriptProcessorNode> ScriptProcessorNode::create(AudioContext* contex
if (numberOfOutputChannels > AudioContext::maxNumberOfChannels())
return nullptr;
- return adoptRef(new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
+ return adoptRef(*new ScriptProcessorNode(context, sampleRate, bufferSize, numberOfInputChannels, numberOfOutputChannels));
}
-ScriptProcessorNode::ScriptProcessorNode(AudioContext* context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
+ScriptProcessorNode::ScriptProcessorNode(AudioContext& context, float sampleRate, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
: AudioNode(context, sampleRate)
, m_doubleBufferIndex(0)
, m_doubleBufferIndexForEvent(0)
@@ -104,7 +105,7 @@ void ScriptProcessorNode::initialize()
if (isInitialized())
return;
- float sampleRate = context()->sampleRate();
+ float sampleRate = context().sampleRate();
// Create double buffers on both the input and output sides.
// These AudioBuffers will be directly accessed in the main thread by JavaScript.
@@ -182,14 +183,14 @@ void ScriptProcessorNode::process(size_t framesToProcess)
return;
for (unsigned i = 0; i < numberOfInputChannels; i++)
- m_internalInputBus->setChannelMemory(i, inputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex, framesToProcess);
+ m_internalInputBus->setChannelMemory(i, inputBuffer->channelData(i)->data() + m_bufferReadWriteIndex, framesToProcess);
if (numberOfInputChannels)
m_internalInputBus->copyFrom(*inputBus);
// Copy from the output buffer to the output.
for (unsigned i = 0; i < numberOfOutputChannels; ++i)
- memcpy(outputBus->channel(i)->mutableData(), outputBuffer->getChannelData(i)->data() + m_bufferReadWriteIndex, sizeof(float) * framesToProcess);
+ memcpy(outputBus->channel(i)->mutableData(), outputBuffer->channelData(i)->data() + m_bufferReadWriteIndex, sizeof(float) * framesToProcess);
// Update the buffering index.
m_bufferReadWriteIndex = (m_bufferReadWriteIndex + framesToProcess) % bufferSize();
@@ -210,30 +211,20 @@ void ScriptProcessorNode::process(size_t framesToProcess)
// Fire the event on the main thread, not this one (which is the realtime audio thread).
m_doubleBufferIndexForEvent = m_doubleBufferIndex;
m_isRequestOutstanding = true;
- callOnMainThread(fireProcessEventDispatch, this);
- }
-
- swapBuffers();
- }
-}
-void ScriptProcessorNode::setOnaudioprocess(PassRefPtr<EventListener> listener)
-{
- m_hasAudioProcessListener = listener;
- setAttributeEventListener(eventNames().audioprocessEvent, listener);
-}
+ callOnMainThread([this] {
+ if (!m_hasAudioProcessListener)
+ return;
-void ScriptProcessorNode::fireProcessEventDispatch(void* userData)
-{
- ScriptProcessorNode* jsAudioNode = static_cast<ScriptProcessorNode*>(userData);
- ASSERT(jsAudioNode);
- if (!jsAudioNode)
- return;
+ fireProcessEvent();
- jsAudioNode->fireProcessEvent();
+ // De-reference to match the ref() call in process().
+ deref();
+ });
+ }
- // De-reference to match the ref() call in process().
- jsAudioNode->deref();
+ swapBuffers();
+ }
}
void ScriptProcessorNode::fireProcessEvent()
@@ -252,12 +243,16 @@ void ScriptProcessorNode::fireProcessEvent()
return;
// Avoid firing the event if the document has already gone away.
- if (context()->scriptExecutionContext()) {
+ if (context().scriptExecutionContext()) {
// Let the audio thread know we've gotten to the point where it's OK for it to make another request.
m_isRequestOutstanding = false;
-
+
+ // Calculate playbackTime with the buffersize which needs to be processed each time when onaudioprocess is called.
+ // The outputBuffer being passed to JS will be played after exhausting previous outputBuffer by double-buffering.
+ double playbackTime = (context().currentSampleFrame() + m_bufferSize) / static_cast<double>(context().sampleRate());
+
// Call the JavaScript event handler which will do the audio processing.
- dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer));
+ dispatchEvent(AudioProcessingEvent::create(inputBuffer, outputBuffer, playbackTime));
}
}
@@ -282,6 +277,28 @@ double ScriptProcessorNode::latencyTime() const
return std::numeric_limits<double>::infinity();
}
+bool ScriptProcessorNode::addEventListener(const AtomicString& eventType, Ref<EventListener>&& listener, const AddEventListenerOptions& options)
+{
+ bool success = AudioNode::addEventListener(eventType, WTFMove(listener), options);
+ if (success && eventType == eventNames().audioprocessEvent)
+ m_hasAudioProcessListener = hasEventListeners(eventNames().audioprocessEvent);
+ return success;
+}
+
+bool ScriptProcessorNode::removeEventListener(const AtomicString& eventType, EventListener& listener, const ListenerOptions& options)
+{
+ bool success = AudioNode::removeEventListener(eventType, listener, options);
+ if (success && eventType == eventNames().audioprocessEvent)
+ m_hasAudioProcessListener = hasEventListeners(eventNames().audioprocessEvent);
+ return success;
+}
+
+void ScriptProcessorNode::removeAllEventListeners()
+{
+ m_hasAudioProcessListener = false;
+ AudioNode::removeAllEventListeners();
+}
+
} // namespace WebCore
#endif // ENABLE(WEB_AUDIO)