diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
commit | 1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch) | |
tree | 46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/WebCore/Modules/webaudio/AudioContext.cpp | |
parent | 32761a6cee1d0dee366b885b7b9c777e67885688 (diff) | |
download | WebKitGtk-tarball-master.tar.gz |
webkitgtk-2.16.5HEADwebkitgtk-2.16.5master
Diffstat (limited to 'Source/WebCore/Modules/webaudio/AudioContext.cpp')
-rw-r--r-- | Source/WebCore/Modules/webaudio/AudioContext.cpp | 712 |
1 files changed, 420 insertions, 292 deletions
diff --git a/Source/WebCore/Modules/webaudio/AudioContext.cpp b/Source/WebCore/Modules/webaudio/AudioContext.cpp index 4854ff03b..0e1c78f1c 100644 --- a/Source/WebCore/Modules/webaudio/AudioContext.cpp +++ b/Source/WebCore/Modules/webaudio/AudioContext.cpp @@ -1,5 +1,6 @@ /* - * Copyright (C) 2010, Google Inc. All rights reserved. + * Copyright (C) 2010 Google Inc. All rights reserved. + * Copyright (C) 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -44,21 +45,26 @@ #include "DelayNode.h" #include "Document.h" #include "DynamicsCompressorNode.h" +#include "EventNames.h" #include "ExceptionCode.h" #include "FFTFrame.h" +#include "Frame.h" #include "GainNode.h" +#include "GenericEventQueue.h" #include "HRTFDatabaseLoader.h" #include "HRTFPanner.h" +#include "JSDOMPromise.h" +#include "NetworkingContext.h" #include "OfflineAudioCompletionEvent.h" #include "OfflineAudioDestinationNode.h" #include "OscillatorNode.h" #include "Page.h" #include "PannerNode.h" #include "PeriodicWave.h" -#include "ScriptCallStack.h" #include "ScriptController.h" #include "ScriptProcessorNode.h" #include "WaveShaperNode.h" +#include <inspector/ScriptCallStack.h> #if ENABLE(MEDIA_STREAM) #include "MediaStream.h" @@ -110,69 +116,45 @@ bool AudioContext::isSampleRateRangeGood(float sampleRate) const unsigned MaxHardwareContexts = 4; unsigned AudioContext::s_hardwareContextCount = 0; -PassRefPtr<AudioContext> AudioContext::create(Document& document, ExceptionCode& ec) +RefPtr<AudioContext> AudioContext::create(Document& document) { - UNUSED_PARAM(ec); - ASSERT(isMainThread()); if (s_hardwareContextCount >= MaxHardwareContexts) return nullptr; RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document))); audioContext->suspendIfNeeded(); - return audioContext.release(); + return audioContext; } // Constructor for rendering to the audio hardware. AudioContext::AudioContext(Document& document) : ActiveDOMObject(&document) - , m_isStopScheduled(false) - , m_isInitialized(false) - , m_isAudioThreadFinished(false) - , m_destinationNode(0) - , m_isDeletionScheduled(false) - , m_automaticPullNodesNeedUpdating(false) - , m_connectionCount(0) - , m_audioThread(0) + , m_mediaSession(PlatformMediaSession::create(*this)) + , m_eventQueue(std::make_unique<GenericEventQueue>(*this)) , m_graphOwnerThread(UndefinedThreadIdentifier) - , m_isOfflineContext(false) - , m_activeSourceCount(0) - , m_restrictions(NoRestrictions) { constructCommon(); - m_destinationNode = DefaultAudioDestinationNode::create(this); + m_destinationNode = DefaultAudioDestinationNode::create(*this); - // This sets in motion an asynchronous loading mechanism on another thread. - // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded. - // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph - // when this has finished (see AudioDestinationNode). - m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate()); + // Initialize the destination node's muted state to match the page's current muted state. + pageMutedStateDidChange(); } // Constructor for offline (non-realtime) rendering. AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) : ActiveDOMObject(&document) - , m_isStopScheduled(false) - , m_isInitialized(false) - , m_isAudioThreadFinished(false) - , m_destinationNode(0) - , m_automaticPullNodesNeedUpdating(false) - , m_connectionCount(0) - , m_audioThread(0) - , m_graphOwnerThread(UndefinedThreadIdentifier) , m_isOfflineContext(true) - , m_activeSourceCount(0) - , m_restrictions(NoRestrictions) + , m_mediaSession(PlatformMediaSession::create(*this)) + , m_eventQueue(std::make_unique<GenericEventQueue>(*this)) + , m_graphOwnerThread(UndefinedThreadIdentifier) { constructCommon(); - // FIXME: the passed in sampleRate MUST match the hardware sample-rate since HRTFDatabaseLoader is a singleton. - m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate); - // Create a new destination for offline rendering. m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate); - m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get()); + m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get()); } void AudioContext::constructCommon() @@ -190,13 +172,13 @@ void AudioContext::constructCommon() m_listener = AudioListener::create(); #if PLATFORM(IOS) - if (!document()->settings() || document()->settings()->mediaPlaybackRequiresUserGesture()) + if (document()->settings().audioPlaybackRequiresUserGesture()) addBehaviorRestriction(RequireUserGestureForAudioStartRestriction); else m_restrictions = NoRestrictions; #endif -#if PLATFORM(MAC) +#if PLATFORM(COCOA) addBehaviorRestriction(RequirePageConsentForAudioStartRestriction); #endif } @@ -206,47 +188,50 @@ AudioContext::~AudioContext() #if DEBUG_AUDIONODE_REFERENCES fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this); #endif - // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around. ASSERT(!m_isInitialized); ASSERT(m_isStopScheduled); - ASSERT(!m_nodesToDelete.size()); - ASSERT(!m_referencedNodes.size()); - ASSERT(!m_finishedNodes.size()); - ASSERT(!m_automaticPullNodes.size()); + ASSERT(m_nodesToDelete.isEmpty()); + ASSERT(m_referencedNodes.isEmpty()); + ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes. + ASSERT(m_automaticPullNodes.isEmpty()); if (m_automaticPullNodesNeedUpdating) m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size()); - ASSERT(!m_renderingAutomaticPullNodes.size()); + ASSERT(m_renderingAutomaticPullNodes.isEmpty()); + // FIXME: Can we assert that m_deferredFinishDerefList is empty? } void AudioContext::lazyInitialize() { - if (!m_isInitialized) { - // Don't allow the context to initialize a second time after it's already been explicitly uninitialized. - ASSERT(!m_isAudioThreadFinished); - if (!m_isAudioThreadFinished) { - if (m_destinationNode.get()) { - m_destinationNode->initialize(); - - if (!isOfflineContext()) { - // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio. - // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum". - // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript. - // We may want to consider requiring it for symmetry with OfflineAudioContext. - startRendering(); - ++s_hardwareContextCount; - } - - } - m_isInitialized = true; + if (m_isInitialized) + return; + + // Don't allow the context to initialize a second time after it's already been explicitly uninitialized. + ASSERT(!m_isAudioThreadFinished); + if (m_isAudioThreadFinished) + return; + + if (m_destinationNode) { + m_destinationNode->initialize(); + + if (!isOfflineContext()) { + document()->addAudioProducer(this); + + // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio. + // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum". + // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript. + // We may want to consider requiring it for symmetry with OfflineAudioContext. + startRendering(); + ++s_hardwareContextCount; } } + m_isInitialized = true; } void AudioContext::clear() { // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context. if (m_destinationNode) - m_destinationNode.clear(); + m_destinationNode = nullptr; // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves. do { @@ -273,8 +258,13 @@ void AudioContext::uninitialize() m_isAudioThreadFinished = true; if (!isOfflineContext()) { + document()->removeAudioProducer(this); + ASSERT(s_hardwareContextCount); --s_hardwareContextCount; + + // Offline contexts move to 'Closed' state when dispatching the completion event. + setState(State::Closed); } // Get rid of the sources which may still be playing. @@ -288,329 +278,293 @@ bool AudioContext::isInitialized() const return m_isInitialized; } -bool AudioContext::isRunnable() const +void AudioContext::addReaction(State state, DOMPromise<void>&& promise) { - if (!isInitialized()) - return false; - - // Check with the HRTF spatialization system to see if it's finished loading. - return m_hrtfDatabaseLoader->isLoaded(); + size_t stateIndex = static_cast<size_t>(state); + if (stateIndex >= m_stateReactions.size()) + m_stateReactions.resize(stateIndex + 1); + + m_stateReactions[stateIndex].append(WTFMove(promise)); } -void AudioContext::stopDispatch(void* userData) +void AudioContext::setState(State state) { - AudioContext* context = reinterpret_cast<AudioContext*>(userData); - ASSERT(context); - if (!context) + if (m_state == state) + return; + + m_state = state; + m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, true, false)); + + size_t stateIndex = static_cast<size_t>(state); + if (stateIndex >= m_stateReactions.size()) return; - context->uninitialize(); - context->clear(); + Vector<DOMPromise<void>> reactions; + m_stateReactions[stateIndex].swap(reactions); + + for (auto& promise : reactions) + promise.resolve(); } void AudioContext::stop() { + ASSERT(isMainThread()); + // Usually ScriptExecutionContext calls stop twice. if (m_isStopScheduled) return; m_isStopScheduled = true; + document()->updateIsPlayingMedia(); + + m_eventQueue->close(); + // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other // ActiveDOMObjects so let's schedule uninitialize() to be called later. // FIXME: see if there's a more direct way to handle this issue. - callOnMainThread(stopDispatch, this); + // FIXME: This sounds very wrong. The whole idea of stop() is that it stops everything, and if we + // schedule some observable work for later, the work likely happens at an inappropriate time. + callOnMainThread([this] { + uninitialize(); + clear(); + }); } -Document* AudioContext::document() const +bool AudioContext::canSuspendForDocumentSuspension() const { - ASSERT(m_scriptExecutionContext && m_scriptExecutionContext->isDocument()); - return static_cast<Document*>(m_scriptExecutionContext); + // FIXME: We should be able to suspend while rendering as well with some more code. + return m_state == State::Suspended || m_state == State::Closed; } -PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec) +const char* AudioContext::activeDOMObjectName() const { - RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate); - if (!audioBuffer.get()) { - ec = NOT_SUPPORTED_ERR; - return nullptr; - } + return "AudioContext"; +} - return audioBuffer; +Document* AudioContext::document() const +{ + ASSERT(m_scriptExecutionContext); + return downcast<Document>(m_scriptExecutionContext); } -PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionCode& ec) +const Document* AudioContext::hostingDocument() const { - ASSERT(arrayBuffer); - if (!arrayBuffer) { - ec = SYNTAX_ERR; - return nullptr; - } + return downcast<Document>(m_scriptExecutionContext); +} - RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate()); - if (!audioBuffer.get()) { - ec = SYNTAX_ERR; - return nullptr; +String AudioContext::sourceApplicationIdentifier() const +{ + Document* document = this->document(); + if (Frame* frame = document ? document->frame() : nullptr) { + if (NetworkingContext* networkingContext = frame->loader().networkingContext()) + return networkingContext->sourceApplicationIdentifier(); } + return emptyString(); +} - return audioBuffer; +ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate) +{ + auto audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate); + if (!audioBuffer) + return Exception { NOT_SUPPORTED_ERR }; + return audioBuffer.releaseNonNull(); } -void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec) +ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(ArrayBuffer& arrayBuffer, bool mixToMono) { - if (!audioData) { - ec = SYNTAX_ERR; - return; - } - m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback); + auto audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer.data(), arrayBuffer.byteLength(), mixToMono, sampleRate()); + if (!audioBuffer) + return Exception { SYNTAX_ERR }; + return audioBuffer.releaseNonNull(); +} + +void AudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback) +{ + m_audioDecoder.decodeAsync(WTFMove(audioData), sampleRate(), WTFMove(successCallback), WTFMove(errorCallback)); } -PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource() +Ref<AudioBufferSourceNode> AudioContext::createBufferSource() { ASSERT(isMainThread()); lazyInitialize(); - RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate()); + Ref<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, m_destinationNode->sampleRate()); // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing. // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing(). - refNode(node.get()); + refNode(node); return node; } #if ENABLE(VIDEO) -PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionCode& ec) + +ExceptionOr<Ref<MediaElementAudioSourceNode>> AudioContext::createMediaElementSource(HTMLMediaElement& mediaElement) { - ASSERT(mediaElement); - if (!mediaElement) { - ec = INVALID_STATE_ERR; - return nullptr; - } - ASSERT(isMainThread()); lazyInitialize(); - // First check if this media element already has a source node. - if (mediaElement->audioSourceNode()) { - ec = INVALID_STATE_ERR; - return nullptr; - } - - RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement); + if (mediaElement.audioSourceNode()) + return Exception { INVALID_STATE_ERR }; + + auto node = MediaElementAudioSourceNode::create(*this, mediaElement); - mediaElement->setAudioSourceNode(node.get()); + mediaElement.setAudioSourceNode(node.ptr()); refNode(node.get()); // context keeps reference until node is disconnected - return node; + return WTFMove(node); } + #endif #if ENABLE(MEDIA_STREAM) -PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec) -{ - ASSERT(mediaStream); - if (!mediaStream) { - ec = INVALID_STATE_ERR; - return nullptr; - } +ExceptionOr<Ref<MediaStreamAudioSourceNode>> AudioContext::createMediaStreamSource(MediaStream& mediaStream) +{ ASSERT(isMainThread()); - lazyInitialize(); - - AudioSourceProvider* provider = 0; - Vector<RefPtr<MediaStreamTrack>> audioTracks = mediaStream->getAudioTracks(); - RefPtr<MediaStreamTrack> audioTrack; + auto audioTracks = mediaStream.getAudioTracks(); + if (audioTracks.isEmpty()) + return Exception { INVALID_STATE_ERR }; - // FIXME: get a provider for non-local MediaStreams (like from a remote peer). - for (size_t i = 0; i < audioTracks.size(); ++i) { - audioTrack = audioTracks[i]; - if (audioTrack->source()->isAudioStreamSource()) { - auto source = static_cast<MediaStreamAudioSource*>(audioTrack->source()); - ASSERT(!source->deviceId().isEmpty()); - destination()->enableInput(source->deviceId()); - provider = destination()->localAudioInputProvider(); + MediaStreamTrack* providerTrack = nullptr; + for (auto& track : audioTracks) { + if (track->audioSourceProvider()) { + providerTrack = track.get(); break; } } + if (!providerTrack) + return Exception { INVALID_STATE_ERR }; - RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider); + lazyInitialize(); - // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams. + auto node = MediaStreamAudioSourceNode::create(*this, mediaStream, *providerTrack); node->setFormat(2, sampleRate()); - refNode(node.get()); // context keeps reference until node is disconnected - return node; + refNode(node); // context keeps reference until node is disconnected + return WTFMove(node); } -PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination() +Ref<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination() { // FIXME: Add support for an optional argument which specifies the number of channels. // FIXME: The default should probably be stereo instead of mono. - return MediaStreamAudioDestinationNode::create(this, 1); + return MediaStreamAudioDestinationNode::create(*this, 1); } #endif -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionCode& ec) -{ - // Set number of input/output channels to stereo by default. - return createScriptProcessor(bufferSize, 2, 2, ec); -} - -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec) -{ - // Set number of output channels to stereo by default. - return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec); -} - -PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec) +ExceptionOr<Ref<ScriptProcessorNode>> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels) { ASSERT(isMainThread()); lazyInitialize(); - RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels); + auto node = ScriptProcessorNode::create(*this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels); - if (!node.get()) { - ec = INDEX_SIZE_ERR; - return nullptr; - } + if (!node) + return Exception { INDEX_SIZE_ERR }; - refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks - return node; + refNode(*node); // context keeps reference until we stop making javascript rendering callbacks + return node.releaseNonNull(); } -PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter() +Ref<BiquadFilterNode> AudioContext::createBiquadFilter() { ASSERT(isMainThread()); lazyInitialize(); - return BiquadFilterNode::create(this, m_destinationNode->sampleRate()); + return BiquadFilterNode::create(*this, m_destinationNode->sampleRate()); } -PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper() +Ref<WaveShaperNode> AudioContext::createWaveShaper() { ASSERT(isMainThread()); lazyInitialize(); - return WaveShaperNode::create(this); + return WaveShaperNode::create(*this); } -PassRefPtr<PannerNode> AudioContext::createPanner() +Ref<PannerNode> AudioContext::createPanner() { ASSERT(isMainThread()); lazyInitialize(); - return PannerNode::create(this, m_destinationNode->sampleRate()); + return PannerNode::create(*this, m_destinationNode->sampleRate()); } -PassRefPtr<ConvolverNode> AudioContext::createConvolver() +Ref<ConvolverNode> AudioContext::createConvolver() { ASSERT(isMainThread()); lazyInitialize(); - return ConvolverNode::create(this, m_destinationNode->sampleRate()); + return ConvolverNode::create(*this, m_destinationNode->sampleRate()); } -PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor() +Ref<DynamicsCompressorNode> AudioContext::createDynamicsCompressor() { ASSERT(isMainThread()); lazyInitialize(); - return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate()); + return DynamicsCompressorNode::create(*this, m_destinationNode->sampleRate()); } -PassRefPtr<AnalyserNode> AudioContext::createAnalyser() +Ref<AnalyserNode> AudioContext::createAnalyser() { ASSERT(isMainThread()); lazyInitialize(); - return AnalyserNode::create(this, m_destinationNode->sampleRate()); + return AnalyserNode::create(*this, m_destinationNode->sampleRate()); } -PassRefPtr<GainNode> AudioContext::createGain() +Ref<GainNode> AudioContext::createGain() { ASSERT(isMainThread()); lazyInitialize(); - return GainNode::create(this, m_destinationNode->sampleRate()); + return GainNode::create(*this, m_destinationNode->sampleRate()); } -PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionCode& ec) -{ - const double defaultMaxDelayTime = 1; - return createDelay(defaultMaxDelayTime, ec); -} - -PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCode& ec) +ExceptionOr<Ref<DelayNode>> AudioContext::createDelay(double maxDelayTime) { ASSERT(isMainThread()); lazyInitialize(); - RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec); - if (ec) - return nullptr; - return node; + return DelayNode::create(*this, m_destinationNode->sampleRate(), maxDelayTime); } -PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionCode& ec) -{ - const unsigned ChannelSplitterDefaultNumberOfOutputs = 6; - return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec); -} - -PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec) +ExceptionOr<Ref<ChannelSplitterNode>> AudioContext::createChannelSplitter(size_t numberOfOutputs) { ASSERT(isMainThread()); lazyInitialize(); - - RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs); - - if (!node.get()) { - ec = SYNTAX_ERR; - return nullptr; - } - - return node; + auto node = ChannelSplitterNode::create(*this, m_destinationNode->sampleRate(), numberOfOutputs); + if (!node) + return Exception { INDEX_SIZE_ERR }; + return node.releaseNonNull(); } -PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& ec) -{ - const unsigned ChannelMergerDefaultNumberOfInputs = 6; - return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec); -} - -PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec) +ExceptionOr<Ref<ChannelMergerNode>> AudioContext::createChannelMerger(size_t numberOfInputs) { ASSERT(isMainThread()); lazyInitialize(); - - RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs); - - if (!node.get()) { - ec = SYNTAX_ERR; - return nullptr; - } - - return node; + auto node = ChannelMergerNode::create(*this, m_destinationNode->sampleRate(), numberOfInputs); + if (!node) + return Exception { INDEX_SIZE_ERR }; + return node.releaseNonNull(); } -PassRefPtr<OscillatorNode> AudioContext::createOscillator() +Ref<OscillatorNode> AudioContext::createOscillator() { ASSERT(isMainThread()); lazyInitialize(); - RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate()); + Ref<OscillatorNode> node = OscillatorNode::create(*this, m_destinationNode->sampleRate()); // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing. // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing(). - refNode(node.get()); + refNode(node); return node; } -PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode& ec) +ExceptionOr<Ref<PeriodicWave>> AudioContext::createPeriodicWave(Float32Array& real, Float32Array& imaginary) { ASSERT(isMainThread()); - - if (!real || !imag || (real->length() != imag->length() || (real->length() > MaxPeriodicWaveLength) || (real->length() <= 0))) { - ec = SYNTAX_ERR; - return nullptr; - } - + if (real.length() != imaginary.length() || (real.length() > MaxPeriodicWaveLength) || !real.length()) + return Exception { INDEX_SIZE_ERR }; lazyInitialize(); - return PeriodicWave::create(sampleRate(), real, imag); + return PeriodicWave::create(sampleRate(), real, imaginary); } void AudioContext::notifyNodeFinishedProcessing(AudioNode* node) @@ -623,40 +577,36 @@ void AudioContext::derefFinishedSourceNodes() { ASSERT(isGraphOwner()); ASSERT(isAudioThread() || isAudioThreadFinished()); - for (unsigned i = 0; i < m_finishedNodes.size(); i++) - derefNode(m_finishedNodes[i]); + for (auto& node : m_finishedNodes) + derefNode(*node); m_finishedNodes.clear(); } -void AudioContext::refNode(AudioNode* node) +void AudioContext::refNode(AudioNode& node) { ASSERT(isMainThread()); AutoLocker locker(*this); - node->ref(AudioNode::RefTypeConnection); - m_referencedNodes.append(node); + node.ref(AudioNode::RefTypeConnection); + m_referencedNodes.append(&node); } -void AudioContext::derefNode(AudioNode* node) +void AudioContext::derefNode(AudioNode& node) { ASSERT(isGraphOwner()); - node->deref(AudioNode::RefTypeConnection); + node.deref(AudioNode::RefTypeConnection); - for (unsigned i = 0; i < m_referencedNodes.size(); ++i) { - if (node == m_referencedNodes[i]) { - m_referencedNodes.remove(i); - break; - } - } + ASSERT(m_referencedNodes.contains(&node)); + m_referencedNodes.removeFirst(&node); } void AudioContext::derefUnfinishedSourceNodes() { ASSERT(isMainThread() && isAudioThreadFinished()); - for (unsigned i = 0; i < m_referencedNodes.size(); ++i) - m_referencedNodes[i]->deref(AudioNode::RefTypeConnection); + for (auto& node : m_referencedNodes) + node->deref(AudioNode::RefTypeConnection); m_referencedNodes.clear(); } @@ -788,10 +738,8 @@ void AudioContext::handlePostRenderTasks() void AudioContext::handleDeferredFinishDerefs() { ASSERT(isAudioThread() && isGraphOwner()); - for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) { - AudioNode* node = m_deferredFinishDerefList[i]; + for (auto& node : m_deferredFinishDerefList) node->finishDeref(AudioNode::RefTypeConnection); - } m_deferredFinishDerefList.clear(); } @@ -826,36 +774,23 @@ void AudioContext::scheduleNodeDeletion() m_isDeletionScheduled = true; - // Don't let ourself get deleted before the callback. - // See matching deref() in deleteMarkedNodesDispatch(). - ref(); - callOnMainThread(deleteMarkedNodesDispatch, this); + callOnMainThread([protectedThis = makeRef(*this)]() mutable { + protectedThis->deleteMarkedNodes(); + }); } } -void AudioContext::deleteMarkedNodesDispatch(void* userData) -{ - AudioContext* context = reinterpret_cast<AudioContext*>(userData); - ASSERT(context); - if (!context) - return; - - context->deleteMarkedNodes(); - context->deref(); -} - void AudioContext::deleteMarkedNodes() { ASSERT(isMainThread()); // Protect this object from being deleted before we release the mutex locked by AutoLocker. - Ref<AudioContext> protect(*this); + Ref<AudioContext> protectedThis(*this); { AutoLocker locker(*this); - while (size_t n = m_nodesToDelete.size()) { - AudioNode* node = m_nodesToDelete[n - 1]; - m_nodesToDelete.removeLast(); + while (m_nodesToDelete.size()) { + AudioNode* node = m_nodesToDelete.takeLast(); // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions. unsigned numberOfInputs = node->numberOfInputs(); @@ -897,8 +832,8 @@ void AudioContext::handleDirtyAudioSummingJunctions() { ASSERT(isGraphOwner()); - for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i) - (*i)->updateRenderingState(); + for (auto& junction : m_dirtySummingJunctions) + junction->updateRenderingState(); m_dirtySummingJunctions.clear(); } @@ -907,8 +842,8 @@ void AudioContext::handleDirtyAudioNodeOutputs() { ASSERT(isGraphOwner()); - for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i) - (*i)->updateRenderingState(); + for (auto& output : m_dirtyAudioNodeOutputs) + output->updateRenderingState(); m_dirtyAudioNodeOutputs.clear(); } @@ -937,11 +872,9 @@ void AudioContext::updateAutomaticPullNodes() // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes. m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size()); - unsigned j = 0; - for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) { - AudioNode* output = *i; - m_renderingAutomaticPullNodes[j] = output; - } + unsigned i = 0; + for (auto& output : m_automaticPullNodes) + m_renderingAutomaticPullNodes[i++] = output; m_automaticPullNodesNeedUpdating = false; } @@ -951,8 +884,8 @@ void AudioContext::processAutomaticPullNodes(size_t framesToProcess) { ASSERT(isAudioThread()); - for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i) - m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess); + for (auto& node : m_renderingAutomaticPullNodes) + node->processIfNecessary(framesToProcess); } ScriptExecutionContext* AudioContext::scriptExecutionContext() const @@ -960,24 +893,97 @@ ScriptExecutionContext* AudioContext::scriptExecutionContext() const return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext(); } -void AudioContext::startRendering() +void AudioContext::nodeWillBeginPlayback() +{ + // Called by scheduled AudioNodes when clients schedule their start times. + // Prior to the introduction of suspend(), resume(), and stop(), starting + // a scheduled AudioNode would remove the user-gesture restriction, if present, + // and would thus unmute the context. Now that AudioContext stays in the + // "suspended" state if a user-gesture restriction is present, starting a + // schedule AudioNode should set the state to "running", but only if the + // user-gesture restriction is set. + if (userGestureRequiredForAudioStart()) + startRendering(); +} + +bool AudioContext::willBeginPlayback() { - if (ScriptController::processingUserGesture()) + if (userGestureRequiredForAudioStart()) { + if (!ScriptController::processingUserGestureForMedia()) + return false; removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction); + } if (pageConsentRequiredForAudioStart()) { Page* page = document()->page(); - if (page && !page->canStartMedia()) + if (page && !page->canStartMedia()) { document()->addMediaCanStartListener(this); - else - removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction); + return false; + } + removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction); + } + + return m_mediaSession->clientWillBeginPlayback(); +} + +bool AudioContext::willPausePlayback() +{ + if (userGestureRequiredForAudioStart()) { + if (!ScriptController::processingUserGestureForMedia()) + return false; + removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction); } + + if (pageConsentRequiredForAudioStart()) { + Page* page = document()->page(); + if (page && !page->canStartMedia()) { + document()->addMediaCanStartListener(this); + return false; + } + removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction); + } + + return m_mediaSession->clientWillPausePlayback(); +} + +void AudioContext::startRendering() +{ + if (!willBeginPlayback()) + return; + destination()->startRendering(); + setState(State::Running); } -void AudioContext::mediaCanStart() +void AudioContext::mediaCanStart(Document& document) { + ASSERT_UNUSED(document, &document == this->document()); removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction); + mayResumePlayback(true); +} + +MediaProducer::MediaStateFlags AudioContext::mediaState() const +{ + if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio()) + return MediaProducer::IsPlayingAudio; + + return MediaProducer::IsNotPlaying; +} + +void AudioContext::pageMutedStateDidChange() +{ + if (m_destinationNode && document()->page()) + m_destinationNode->setMuted(document()->page()->isAudioMuted()); +} + +void AudioContext::isPlayingAudioDidChange() +{ + // Make sure to call Document::updateIsPlayingMedia() on the main thread, since + // we could be on the audio I/O thread here and the call into WebCore could block. + callOnMainThread([protectedThis = makeRef(*this)] { + if (protectedThis->document()) + protectedThis->document()->updateIsPlayingMedia(); + }); } void AudioContext::fireCompletionEvent() @@ -987,6 +993,7 @@ void AudioContext::fireCompletionEvent() return; AudioBuffer* renderedBuffer = m_renderTarget.get(); + setState(State::Closed); ASSERT(renderedBuffer); if (!renderedBuffer) @@ -995,7 +1002,7 @@ void AudioContext::fireCompletionEvent() // Avoid firing the event if the document has already gone away. if (scriptExecutionContext()) { // Call the offline rendering completion event listener. - dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); + m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer)); } } @@ -1009,6 +1016,127 @@ void AudioContext::decrementActiveSourceCount() --m_activeSourceCount; } +void AudioContext::suspend(DOMPromise<void>&& promise) +{ + if (isOfflineContext()) { + promise.reject(INVALID_STATE_ERR); + return; + } + + if (m_state == State::Suspended) { + promise.resolve(); + return; + } + + if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) { + promise.reject(); + return; + } + + addReaction(State::Suspended, WTFMove(promise)); + + if (!willPausePlayback()) + return; + + lazyInitialize(); + + m_destinationNode->suspend([this, protectedThis = makeRef(*this)] { + setState(State::Suspended); + }); +} + +void AudioContext::resume(DOMPromise<void>&& promise) +{ + if (isOfflineContext()) { + promise.reject(INVALID_STATE_ERR); + return; + } + + if (m_state == State::Running) { + promise.resolve(); + return; + } + + if (m_state == State::Closed || !m_destinationNode) { + promise.reject(); + return; + } + + addReaction(State::Running, WTFMove(promise)); + + if (!willBeginPlayback()) + return; + + lazyInitialize(); + + m_destinationNode->resume([this, protectedThis = makeRef(*this)] { + setState(State::Running); + }); +} + +void AudioContext::close(DOMPromise<void>&& promise) +{ + if (isOfflineContext()) { + promise.reject(INVALID_STATE_ERR); + return; + } + + if (m_state == State::Closed || !m_destinationNode) { + promise.resolve(); + return; + } + + addReaction(State::Closed, WTFMove(promise)); + + lazyInitialize(); + + m_destinationNode->close([this, protectedThis = makeRef(*this)] { + setState(State::Closed); + uninitialize(); + }); +} + + +void AudioContext::suspendPlayback() +{ + if (!m_destinationNode || m_state == State::Closed) + return; + + if (m_state == State::Suspended) { + if (m_mediaSession->state() == PlatformMediaSession::Interrupted) + setState(State::Interrupted); + return; + } + + lazyInitialize(); + + m_destinationNode->suspend([this, protectedThis = makeRef(*this)] { + bool interrupted = m_mediaSession->state() == PlatformMediaSession::Interrupted; + setState(interrupted ? State::Interrupted : State::Suspended); + }); +} + +void AudioContext::mayResumePlayback(bool shouldResume) +{ + if (!m_destinationNode || m_state == State::Closed || m_state == State::Running) + return; + + if (!shouldResume) { + setState(State::Suspended); + return; + } + + if (!willBeginPlayback()) + return; + + lazyInitialize(); + + m_destinationNode->resume([this, protectedThis = makeRef(*this)] { + setState(State::Running); + }); +} + + } // namespace WebCore #endif // ENABLE(WEB_AUDIO) |