summaryrefslogtreecommitdiff
path: root/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp')
-rw-r--r--Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp277
1 files changed, 173 insertions, 104 deletions
diff --git a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
index 8ee6d61b9..11319a85c 100644
--- a/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
+++ b/Source/WebCore/Modules/webaudio/AudioBufferSourceNode.cpp
@@ -28,17 +28,14 @@
#include "AudioBufferSourceNode.h"
+#include "AudioBuffer.h"
#include "AudioContext.h"
#include "AudioNodeOutput.h"
+#include "AudioParam.h"
#include "AudioUtilities.h"
#include "FloatConversion.h"
-#include "ScriptCallStack.h"
-#include "ScriptController.h"
+#include "PannerNode.h"
#include "ScriptExecutionContext.h"
-#include <algorithm>
-#include <wtf/MainThread.h>
-#include <wtf/MathExtras.h>
-#include <wtf/StdLibExtras.h>
namespace WebCore {
@@ -49,14 +46,14 @@ const double DefaultGrainDuration = 0.020; // 20ms
// to minimize linear interpolation aliasing.
const double MaxRate = 1024;
-PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, float sampleRate)
+Ref<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext& context, float sampleRate)
{
- return adoptRef(new AudioBufferSourceNode(context, sampleRate));
+ return adoptRef(*new AudioBufferSourceNode(context, sampleRate));
}
-AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sampleRate)
+AudioBufferSourceNode::AudioBufferSourceNode(AudioContext& context, float sampleRate)
: AudioScheduledSourceNode(context, sampleRate)
- , m_buffer(0)
+ , m_buffer(nullptr)
, m_isLooping(false)
, m_loopStart(0)
, m_loopEnd(0)
@@ -65,12 +62,12 @@ AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, float sample
, m_grainOffset(0.0)
, m_grainDuration(DefaultGrainDuration)
, m_lastGain(1.0)
- , m_pannerNode(0)
+ , m_pannerNode(nullptr)
{
setNodeType(NodeTypeAudioBufferSource);
m_gain = AudioParam::create(context, "gain", 1.0, 0.0, 1.0);
- m_playbackRate = AudioParam::create(context, "playbackRate", 1.0, 0.0, MaxRate);
+ m_playbackRate = AudioParam::create(context, "playbackRate", 1.0, -MaxRate, MaxRate);
// Default to mono. A call to setBuffer() will set the number of output channels to that of the buffer.
addOutput(std::make_unique<AudioNodeOutput>(this, 1));
@@ -86,23 +83,23 @@ AudioBufferSourceNode::~AudioBufferSourceNode()
void AudioBufferSourceNode::process(size_t framesToProcess)
{
- AudioBus* outputBus = output(0)->bus();
+ auto& outputBus = *output(0)->bus();
if (!isInitialized()) {
- outputBus->zero();
+ outputBus.zero();
return;
}
// The audio thread can't block on this lock, so we use std::try_to_lock instead.
- std::unique_lock<std::mutex> lock(m_processMutex, std::try_to_lock);
+ std::unique_lock<Lock> lock(m_processMutex, std::try_to_lock);
if (!lock.owns_lock()) {
// Too bad - the try_lock() failed. We must be in the middle of changing buffers and were already outputting silence anyway.
- outputBus->zero();
+ outputBus.zero();
return;
}
if (!buffer()) {
- outputBus->zero();
+ outputBus.zero();
return;
}
@@ -110,33 +107,32 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
// before the output bus is updated to the new number of channels because of use of tryLocks() in the context's updating system.
// In this case, if the the buffer has just been changed and we're not quite ready yet, then just output silence.
if (numberOfChannels() != buffer()->numberOfChannels()) {
- outputBus->zero();
+ outputBus.zero();
return;
}
size_t quantumFrameOffset;
size_t bufferFramesToProcess;
-
updateSchedulingInfo(framesToProcess, outputBus, quantumFrameOffset, bufferFramesToProcess);
if (!bufferFramesToProcess) {
- outputBus->zero();
+ outputBus.zero();
return;
}
- for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
- m_destinationChannels[i] = outputBus->channel(i)->mutableData();
+ for (unsigned i = 0; i < outputBus.numberOfChannels(); ++i)
+ m_destinationChannels[i] = outputBus.channel(i)->mutableData();
// Render by reading directly from the buffer.
- if (!renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProcess)) {
- outputBus->zero();
+ if (!renderFromBuffer(&outputBus, quantumFrameOffset, bufferFramesToProcess)) {
+ outputBus.zero();
return;
}
// Apply the gain (in-place) to the output bus.
float totalGain = gain()->value() * m_buffer->gain();
- outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);
- outputBus->clearSilentFlag();
+ outputBus.copyWithGainFrom(outputBus, &m_lastGain, totalGain);
+ outputBus.clearSilentFlag();
}
// Returns true if we're finished.
@@ -160,7 +156,7 @@ bool AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping(AudioBus*, unsign
bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destinationFrameOffset, size_t numberOfFrames)
{
- ASSERT(context()->isAudioThread());
+ ASSERT(context().isAudioThread());
// Basic sanity checking
ASSERT(bus);
@@ -200,47 +196,54 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
size_t bufferLength = buffer()->length();
double bufferSampleRate = buffer()->sampleRate();
+ double pitchRate = totalPitchRate();
+ bool reverse = pitchRate < 0;
// Avoid converting from time to sample-frames twice by computing
// the grain end time first before computing the sample frame.
- unsigned endFrame = m_isGrain ? AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, bufferSampleRate) : bufferLength;
-
- // This is a HACK to allow for HRTF tail-time - avoids glitch at end.
- // FIXME: implement tailTime for each AudioNode for a more general solution to this problem.
- // https://bugs.webkit.org/show_bug.cgi?id=77224
+ unsigned maxFrame;
if (m_isGrain)
- endFrame += 512;
+ maxFrame = AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, bufferSampleRate);
+ else
+ maxFrame = bufferLength;
// Do some sanity checking.
- if (endFrame > bufferLength)
- endFrame = bufferLength;
- if (m_virtualReadIndex >= endFrame)
+ if (maxFrame > bufferLength)
+ maxFrame = bufferLength;
+ if (reverse && m_virtualReadIndex <= 0)
+ m_virtualReadIndex = maxFrame - 1;
+ else if (!reverse && m_virtualReadIndex >= maxFrame)
m_virtualReadIndex = 0; // reset to start
// If the .loop attribute is true, then values of m_loopStart == 0 && m_loopEnd == 0 implies
// that we should use the entire buffer as the loop, otherwise use the loop values in m_loopStart and m_loopEnd.
- double virtualEndFrame = endFrame;
- double virtualDeltaFrames = endFrame;
+ double virtualMaxFrame = maxFrame;
+ double virtualMinFrame = 0;
+ double virtualDeltaFrames = maxFrame;
if (loop() && (m_loopStart || m_loopEnd) && m_loopStart >= 0 && m_loopEnd > 0 && m_loopStart < m_loopEnd) {
// Convert from seconds to sample-frames.
- double loopStartFrame = m_loopStart * buffer()->sampleRate();
- double loopEndFrame = m_loopEnd * buffer()->sampleRate();
+ double loopMinFrame = m_loopStart * buffer()->sampleRate();
+ double loopMaxFrame = m_loopEnd * buffer()->sampleRate();
- virtualEndFrame = std::min(loopEndFrame, virtualEndFrame);
- virtualDeltaFrames = virtualEndFrame - loopStartFrame;
+ virtualMaxFrame = std::min(loopMaxFrame, virtualMaxFrame);
+ virtualMinFrame = std::max(loopMinFrame, virtualMinFrame);
+ virtualDeltaFrames = virtualMaxFrame - virtualMinFrame;
}
- double pitchRate = totalPitchRate();
-
// Sanity check that our playback rate isn't larger than the loop size.
- if (pitchRate >= virtualDeltaFrames)
+ if (fabs(pitchRate) >= virtualDeltaFrames)
return false;
// Get local copy.
double virtualReadIndex = m_virtualReadIndex;
+ bool needsInterpolation = virtualReadIndex != floor(virtualReadIndex)
+ || virtualDeltaFrames != floor(virtualDeltaFrames)
+ || virtualMaxFrame != floor(virtualMaxFrame)
+ || virtualMinFrame != floor(virtualMinFrame);
+
// Render loop - reading from the source buffer to the destination using linear interpolation.
int framesToProcess = numberOfFrames;
@@ -249,14 +252,12 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
// Optimize for the very common case of playing back with pitchRate == 1.
// We can avoid the linear interpolation.
- if (pitchRate == 1 && virtualReadIndex == floor(virtualReadIndex)
- && virtualDeltaFrames == floor(virtualDeltaFrames)
- && virtualEndFrame == floor(virtualEndFrame)) {
+ if (pitchRate == 1 && !needsInterpolation) {
unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
unsigned deltaFrames = static_cast<unsigned>(virtualDeltaFrames);
- endFrame = static_cast<unsigned>(virtualEndFrame);
+ maxFrame = static_cast<unsigned>(virtualMaxFrame);
while (framesToProcess > 0) {
- int framesToEnd = endFrame - readIndex;
+ int framesToEnd = maxFrame - readIndex;
int framesThisTime = std::min(framesToProcess, framesToEnd);
framesThisTime = std::max(0, framesThisTime);
@@ -268,13 +269,83 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
framesToProcess -= framesThisTime;
// Wrap-around.
- if (readIndex >= endFrame) {
+ if (readIndex >= maxFrame) {
readIndex -= deltaFrames;
if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
break;
}
}
virtualReadIndex = readIndex;
+ } else if (pitchRate == -1 && !needsInterpolation) {
+ int readIndex = static_cast<int>(virtualReadIndex);
+ int deltaFrames = static_cast<int>(virtualDeltaFrames);
+ int minFrame = static_cast<int>(virtualMinFrame) - 1;
+ while (framesToProcess > 0) {
+ int framesToEnd = readIndex - minFrame;
+ int framesThisTime = std::min<int>(framesToProcess, framesToEnd);
+ framesThisTime = std::max<int>(0, framesThisTime);
+
+ while (framesThisTime--) {
+ for (unsigned i = 0; i < numberOfChannels; ++i) {
+ float* destination = destinationChannels[i];
+ const float* source = sourceChannels[i];
+
+ destination[writeIndex] = source[readIndex];
+ }
+
+ ++writeIndex;
+ --readIndex;
+ --framesToProcess;
+ }
+
+ // Wrap-around.
+ if (readIndex <= minFrame) {
+ readIndex += deltaFrames;
+ if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
+ break;
+ }
+ }
+ virtualReadIndex = readIndex;
+ } else if (!pitchRate) {
+ unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
+
+ for (unsigned i = 0; i < numberOfChannels; ++i)
+ std::fill_n(destinationChannels[i], framesToProcess, sourceChannels[i][readIndex]);
+ } else if (reverse) {
+ unsigned maxFrame = static_cast<unsigned>(virtualMaxFrame);
+ unsigned minFrame = static_cast<unsigned>(floorf(virtualMinFrame));
+
+ while (framesToProcess--) {
+ unsigned readIndex = static_cast<unsigned>(floorf(virtualReadIndex));
+ double interpolationFactor = virtualReadIndex - readIndex;
+
+ unsigned readIndex2 = readIndex + 1;
+ if (readIndex2 >= maxFrame)
+ readIndex2 = loop() ? minFrame : maxFrame - 1;
+
+ // Linear interpolation.
+ for (unsigned i = 0; i < numberOfChannels; ++i) {
+ float* destination = destinationChannels[i];
+ const float* source = sourceChannels[i];
+
+ double sample1 = source[readIndex];
+ double sample2 = source[readIndex2];
+ double sample = (1.0 - interpolationFactor) * sample1 + interpolationFactor * sample2;
+
+ destination[writeIndex] = narrowPrecisionToFloat(sample);
+ }
+
+ writeIndex++;
+
+ virtualReadIndex += pitchRate;
+
+ // Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point.
+ if (virtualReadIndex < virtualMinFrame) {
+ virtualReadIndex += virtualDeltaFrames;
+ if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
+ break;
+ }
+ }
} else {
while (framesToProcess--) {
unsigned readIndex = static_cast<unsigned>(virtualReadIndex);
@@ -311,7 +382,7 @@ bool AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
virtualReadIndex += pitchRate;
// Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point.
- if (virtualReadIndex >= virtualEndFrame) {
+ if (virtualReadIndex >= virtualMaxFrame) {
virtualReadIndex -= virtualDeltaFrames;
if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
break;
@@ -333,22 +404,20 @@ void AudioBufferSourceNode::reset()
m_lastGain = gain()->value();
}
-bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer)
+void AudioBufferSourceNode::setBuffer(RefPtr<AudioBuffer>&& buffer)
{
ASSERT(isMainThread());
// The context must be locked since changing the buffer can re-configure the number of channels that are output.
- AudioContext::AutoLocker contextLocker(*context());
+ AudioContext::AutoLocker contextLocker(context());
// This synchronizes with process().
- std::lock_guard<std::mutex> lock(m_processMutex);
+ std::lock_guard<Lock> lock(m_processMutex);
if (buffer) {
// Do any necesssary re-configuration to the buffer's number of channels.
unsigned numberOfChannels = buffer->numberOfChannels();
-
- if (numberOfChannels > AudioContext::maxNumberOfChannels())
- return false;
+ ASSERT(numberOfChannels <= AudioContext::maxNumberOfChannels());
output(0)->setNumberOfChannels(numberOfChannels);
@@ -356,13 +425,11 @@ bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer)
m_destinationChannels = std::make_unique<float*[]>(numberOfChannels);
for (unsigned i = 0; i < numberOfChannels; ++i)
- m_sourceChannels[i] = buffer->getChannelData(i)->data();
+ m_sourceChannels[i] = buffer->channelData(i)->data();
}
m_virtualReadIndex = 0;
- m_buffer = buffer;
-
- return true;
+ m_buffer = WTFMove(buffer);
}
unsigned AudioBufferSourceNode::numberOfChannels()
@@ -370,61 +437,67 @@ unsigned AudioBufferSourceNode::numberOfChannels()
return output(0)->numberOfChannels();
}
-void AudioBufferSourceNode::startGrain(double when, double grainOffset, ExceptionCode& ec)
+ExceptionOr<void> AudioBufferSourceNode::start(double when, double grainOffset, std::optional<double> optionalGrainDuration)
{
- // Duration of 0 has special value, meaning calculate based on the entire buffer's duration.
- startGrain(when, grainOffset, 0, ec);
+ double grainDuration = 0;
+ if (optionalGrainDuration)
+ grainDuration = optionalGrainDuration.value();
+ else if (buffer())
+ grainDuration = buffer()->duration() - grainOffset;
+
+ return startPlaying(Partial, when, grainOffset, grainDuration);
}
-void AudioBufferSourceNode::startGrain(double when, double grainOffset, double grainDuration, ExceptionCode& ec)
+ExceptionOr<void> AudioBufferSourceNode::startPlaying(BufferPlaybackMode playbackMode, double when, double grainOffset, double grainDuration)
{
ASSERT(isMainThread());
- if (ScriptController::processingUserGesture())
- context()->removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
+ context().nodeWillBeginPlayback();
- if (m_playbackState != UNSCHEDULED_STATE) {
- ec = INVALID_STATE_ERR;
- return;
- }
+ if (m_playbackState != UNSCHEDULED_STATE)
+ return Exception { INVALID_STATE_ERR };
+
+ if (!std::isfinite(when) || (when < 0))
+ return Exception { INVALID_STATE_ERR };
+
+ if (!std::isfinite(grainOffset) || (grainOffset < 0))
+ return Exception { INVALID_STATE_ERR };
+
+ if (!std::isfinite(grainDuration) || (grainDuration < 0))
+ return Exception { INVALID_STATE_ERR };
if (!buffer())
- return;
-
- // Do sanity checking of grain parameters versus buffer size.
- double bufferDuration = buffer()->duration();
+ return { };
- grainOffset = std::max(0.0, grainOffset);
- grainOffset = std::min(bufferDuration, grainOffset);
- m_grainOffset = grainOffset;
+ m_isGrain = playbackMode == Partial;
+ if (m_isGrain) {
+ // Do sanity checking of grain parameters versus buffer size.
+ double bufferDuration = buffer()->duration();
- // Handle default/unspecified duration.
- double maxDuration = bufferDuration - grainOffset;
- if (!grainDuration)
- grainDuration = maxDuration;
+ m_grainOffset = std::min(bufferDuration, grainOffset);
- grainDuration = std::max(0.0, grainDuration);
- grainDuration = std::min(maxDuration, grainDuration);
- m_grainDuration = grainDuration;
+ double maxDuration = bufferDuration - m_grainOffset;
+ m_grainDuration = std::min(maxDuration, grainDuration);
+ } else {
+ m_grainOffset = 0.0;
+ m_grainDuration = buffer()->duration();
+ }
- m_isGrain = true;
m_startTime = when;
// We call timeToSampleFrame here since at playbackRate == 1 we don't want to go through linear interpolation
// at a sub-sample position since it will degrade the quality.
// When aligned to the sample-frame the playback will be identical to the PCM data stored in the buffer.
// Since playbackRate == 1 is very common, it's worth considering quality.
- m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
+ if (totalPitchRate() < 0)
+ m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset + m_grainDuration, buffer()->sampleRate()) - 1;
+ else
+ m_virtualReadIndex = AudioUtilities::timeToSampleFrame(m_grainOffset, buffer()->sampleRate());
m_playbackState = SCHEDULED_STATE;
-}
-#if ENABLE(LEGACY_WEB_AUDIO)
-void AudioBufferSourceNode::noteGrainOn(double when, double grainOffset, double grainDuration, ExceptionCode& ec)
-{
- startGrain(when, grainOffset, grainDuration, ec);
+ return { };
}
-#endif
double AudioBufferSourceNode::totalPitchRate()
{
@@ -442,11 +515,7 @@ double AudioBufferSourceNode::totalPitchRate()
double totalRate = dopplerRate * sampleRateFactor * basePitchRate;
- // Sanity check the total rate. It's very important that the resampler not get any bad rate values.
- totalRate = std::max(0.0, totalRate);
- if (!totalRate)
- totalRate = 1; // zero rate is considered illegal
- totalRate = std::min(MaxRate, totalRate);
+ totalRate = std::max(-MaxRate, std::min(MaxRate, totalRate));
bool isTotalRateValid = !std::isnan(totalRate) && !std::isinf(totalRate);
ASSERT(isTotalRateValid);
@@ -459,8 +528,8 @@ double AudioBufferSourceNode::totalPitchRate()
bool AudioBufferSourceNode::looping()
{
static bool firstTime = true;
- if (firstTime && context() && context()->scriptExecutionContext()) {
- context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead.");
+ if (firstTime && context().scriptExecutionContext()) {
+ context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead."));
firstTime = false;
}
@@ -470,8 +539,8 @@ bool AudioBufferSourceNode::looping()
void AudioBufferSourceNode::setLooping(bool looping)
{
static bool firstTime = true;
- if (firstTime && context() && context()->scriptExecutionContext()) {
- context()->scriptExecutionContext()->addConsoleMessage(JSMessageSource, WarningMessageLevel, "AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead.");
+ if (firstTime && context().scriptExecutionContext()) {
+ context().scriptExecutionContext()->addConsoleMessage(MessageSource::JS, MessageLevel::Warning, ASCIILiteral("AudioBufferSourceNode 'looping' attribute is deprecated. Use 'loop' instead."));
firstTime = false;
}
@@ -499,7 +568,7 @@ void AudioBufferSourceNode::clearPannerNode()
{
if (m_pannerNode) {
m_pannerNode->deref(AudioNode::RefTypeConnection);
- m_pannerNode = 0;
+ m_pannerNode = nullptr;
}
}