summaryrefslogtreecommitdiff
path: root/Source/WebCore/Modules/webaudio/AudioContext.h
diff options
context:
space:
mode:
Diffstat (limited to 'Source/WebCore/Modules/webaudio/AudioContext.h')
-rw-r--r--Source/WebCore/Modules/webaudio/AudioContext.h251
1 files changed, 146 insertions, 105 deletions
diff --git a/Source/WebCore/Modules/webaudio/AudioContext.h b/Source/WebCore/Modules/webaudio/AudioContext.h
index 1e965d9ad..c631f1f19 100644
--- a/Source/WebCore/Modules/webaudio/AudioContext.h
+++ b/Source/WebCore/Modules/webaudio/AudioContext.h
@@ -1,5 +1,6 @@
/*
- * Copyright (C) 2010, Google Inc. All rights reserved.
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -22,8 +23,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AudioContext_h
-#define AudioContext_h
+#pragma once
#include "ActiveDOMObject.h"
#include "AsyncAudioDecoder.h"
@@ -31,12 +31,13 @@
#include "AudioDestinationNode.h"
#include "EventListener.h"
#include "EventTarget.h"
+#include "JSDOMPromise.h"
#include "MediaCanStartListener.h"
+#include "MediaProducer.h"
+#include "PlatformMediaSession.h"
#include <atomic>
#include <wtf/HashSet.h>
#include <wtf/MainThread.h>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefCounted.h>
#include <wtf/RefPtr.h>
#include <wtf/ThreadSafeRefCounted.h>
#include <wtf/Threading.h>
@@ -45,41 +46,39 @@
namespace WebCore {
+class AnalyserNode;
class AudioBuffer;
class AudioBufferCallback;
class AudioBufferSourceNode;
-class MediaElementAudioSourceNode;
-class MediaStreamAudioDestinationNode;
-class MediaStreamAudioSourceNode;
-class HRTFDatabaseLoader;
-class HTMLMediaElement;
-class ChannelMergerNode;
-class ChannelSplitterNode;
-class GainNode;
-class PannerNode;
class AudioListener;
class AudioSummingJunction;
class BiquadFilterNode;
+class ChannelMergerNode;
+class ChannelSplitterNode;
+class ConvolverNode;
class DelayNode;
class Document;
-class ConvolverNode;
class DynamicsCompressorNode;
-class AnalyserNode;
-class WaveShaperNode;
-class ScriptProcessorNode;
+class GainNode;
+class GenericEventQueue;
+class HTMLMediaElement;
+class MediaElementAudioSourceNode;
+class MediaStream;
+class MediaStreamAudioDestinationNode;
+class MediaStreamAudioSourceNode;
class OscillatorNode;
+class PannerNode;
class PeriodicWave;
+class ScriptProcessorNode;
+class WaveShaperNode;
// AudioContext is the cornerstone of the web audio API and all AudioNodes are created from it.
// For thread safety between the audio thread and the main thread, it has a rendering graph locking mechanism.
-class AudioContext : public ActiveDOMObject, public ThreadSafeRefCounted<AudioContext>, public EventTargetWithInlineData, public MediaCanStartListener {
+class AudioContext : public ActiveDOMObject, public ThreadSafeRefCounted<AudioContext>, public EventTargetWithInlineData, public MediaCanStartListener, public MediaProducer, private PlatformMediaSessionClient {
public:
// Create an AudioContext for rendering to the audio hardware.
- static PassRefPtr<AudioContext> create(Document&, ExceptionCode&);
-
- // Create an AudioContext for offline (non-realtime) rendering.
- static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
+ static RefPtr<AudioContext> create(Document&);
virtual ~AudioContext();
@@ -87,16 +86,10 @@ public:
bool isOfflineContext() { return m_isOfflineContext; }
- // Returns true when initialize() was called AND all asynchronous initialization has completed.
- bool isRunnable() const;
-
- HRTFDatabaseLoader* hrtfDatabaseLoader() const { return m_hrtfDatabaseLoader.get(); }
-
- // Document notification
- virtual void stop() override;
-
Document* document() const; // ASSERTs if document no longer exists.
+ const Document* hostingDocument() const override;
+
AudioDestinationNode* destination() { return m_destinationNode.get(); }
size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); }
double currentTime() const { return m_destinationNode->currentTime(); }
@@ -106,41 +99,46 @@ public:
void incrementActiveSourceCount();
void decrementActiveSourceCount();
- PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
- PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionCode&);
+ ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
+ ExceptionOr<Ref<AudioBuffer>> createBuffer(ArrayBuffer&, bool mixToMono);
// Asynchronous audio file data decoding.
- void decodeAudioData(ArrayBuffer*, PassRefPtr<AudioBufferCallback>, PassRefPtr<AudioBufferCallback>, ExceptionCode& ec);
+ void decodeAudioData(Ref<ArrayBuffer>&&, RefPtr<AudioBufferCallback>&&, RefPtr<AudioBufferCallback>&&);
AudioListener* listener() { return m_listener.get(); }
+ using ActiveDOMObject::suspend;
+ using ActiveDOMObject::resume;
+
+ void suspend(DOMPromise<void>&&);
+ void resume(DOMPromise<void>&&);
+ void close(DOMPromise<void>&&);
+
+ enum class State { Suspended, Running, Interrupted, Closed };
+ State state() const;
+
// The AudioNode create methods are called on the main thread (from JavaScript).
- PassRefPtr<AudioBufferSourceNode> createBufferSource();
+ Ref<AudioBufferSourceNode> createBufferSource();
#if ENABLE(VIDEO)
- PassRefPtr<MediaElementAudioSourceNode> createMediaElementSource(HTMLMediaElement*, ExceptionCode&);
+ ExceptionOr<Ref<MediaElementAudioSourceNode>> createMediaElementSource(HTMLMediaElement&);
#endif
#if ENABLE(MEDIA_STREAM)
- PassRefPtr<MediaStreamAudioSourceNode> createMediaStreamSource(MediaStream*, ExceptionCode&);
- PassRefPtr<MediaStreamAudioDestinationNode> createMediaStreamDestination();
+ ExceptionOr<Ref<MediaStreamAudioSourceNode>> createMediaStreamSource(MediaStream&);
+ Ref<MediaStreamAudioDestinationNode> createMediaStreamDestination();
#endif
- PassRefPtr<GainNode> createGain();
- PassRefPtr<BiquadFilterNode> createBiquadFilter();
- PassRefPtr<WaveShaperNode> createWaveShaper();
- PassRefPtr<DelayNode> createDelay(ExceptionCode&);
- PassRefPtr<DelayNode> createDelay(double maxDelayTime, ExceptionCode&);
- PassRefPtr<PannerNode> createPanner();
- PassRefPtr<ConvolverNode> createConvolver();
- PassRefPtr<DynamicsCompressorNode> createDynamicsCompressor();
- PassRefPtr<AnalyserNode> createAnalyser();
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, ExceptionCode&);
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode&);
- PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode&);
- PassRefPtr<ChannelSplitterNode> createChannelSplitter(ExceptionCode&);
- PassRefPtr<ChannelSplitterNode> createChannelSplitter(size_t numberOfOutputs, ExceptionCode&);
- PassRefPtr<ChannelMergerNode> createChannelMerger(ExceptionCode&);
- PassRefPtr<ChannelMergerNode> createChannelMerger(size_t numberOfInputs, ExceptionCode&);
- PassRefPtr<OscillatorNode> createOscillator();
- PassRefPtr<PeriodicWave> createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode&);
+ Ref<GainNode> createGain();
+ Ref<BiquadFilterNode> createBiquadFilter();
+ Ref<WaveShaperNode> createWaveShaper();
+ ExceptionOr<Ref<DelayNode>> createDelay(double maxDelayTime);
+ Ref<PannerNode> createPanner();
+ Ref<ConvolverNode> createConvolver();
+ Ref<DynamicsCompressorNode> createDynamicsCompressor();
+ Ref<AnalyserNode> createAnalyser();
+ ExceptionOr<Ref<ScriptProcessorNode>> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels);
+ ExceptionOr<Ref<ChannelSplitterNode>> createChannelSplitter(size_t numberOfOutputs);
+ ExceptionOr<Ref<ChannelMergerNode>> createChannelMerger(size_t numberOfInputs);
+ Ref<OscillatorNode> createOscillator();
+ ExceptionOr<Ref<PeriodicWave>> createPeriodicWave(Float32Array& real, Float32Array& imaginary);
// When a source node has no more processing to do (has finished playing), then it tells the context to dereference it.
void notifyNodeFinishedProcessing(AudioNode*);
@@ -198,8 +196,8 @@ public:
// Returns true if this thread owns the context's lock.
bool isGraphOwner() const;
- // Returns the maximum numuber of channels we can support.
- static unsigned maxNumberOfChannels() { return MaxNumberOfChannels;}
+ // Returns the maximum number of channels we can support.
+ static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; }
class AutoLocker {
public:
@@ -234,14 +232,12 @@ public:
void removeMarkedSummingJunction(AudioSummingJunction*);
// EventTarget
- virtual EventTargetInterface eventTargetInterface() const override final { return AudioContextEventTargetInterfaceType; }
- virtual ScriptExecutionContext* scriptExecutionContext() const override final;
-
- DEFINE_ATTRIBUTE_EVENT_LISTENER(complete);
+ EventTargetInterface eventTargetInterface() const final { return AudioContextEventTargetInterfaceType; }
+ ScriptExecutionContext* scriptExecutionContext() const final;
// Reconcile ref/deref which are defined both in ThreadSafeRefCounted and EventTarget.
- using ThreadSafeRefCounted<AudioContext>::ref;
- using ThreadSafeRefCounted<AudioContext>::deref;
+ using ThreadSafeRefCounted::ref;
+ using ThreadSafeRefCounted::deref;
void startRendering();
void fireCompletionEvent();
@@ -256,12 +252,14 @@ public:
};
typedef unsigned BehaviorRestrictions;
- bool userGestureRequiredForAudioStart() const { return m_restrictions & RequireUserGestureForAudioStartRestriction; }
- bool pageConsentRequiredForAudioStart() const { return m_restrictions & RequirePageConsentForAudioStartRestriction; }
-
+ BehaviorRestrictions behaviorRestrictions() const { return m_restrictions; }
void addBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions |= restriction; }
void removeBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions &= ~restriction; }
+ void isPlayingAudioDidChange();
+
+ void nodeWillBeginPlayback();
+
protected:
explicit AudioContext(Document&);
AudioContext(Document&, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
@@ -274,33 +272,62 @@ private:
void lazyInitialize();
void uninitialize();
- // ScriptExecutionContext calls stop twice.
- // We'd like to schedule only one stop action for them.
- bool m_isStopScheduled;
- static void stopDispatch(void* userData);
+ bool willBeginPlayback();
+ bool willPausePlayback();
+
+ bool userGestureRequiredForAudioStart() const { return m_restrictions & RequireUserGestureForAudioStartRestriction; }
+ bool pageConsentRequiredForAudioStart() const { return m_restrictions & RequirePageConsentForAudioStartRestriction; }
+
+ void setState(State);
+
void clear();
void scheduleNodeDeletion();
- static void deleteMarkedNodesDispatch(void* userData);
- virtual void mediaCanStart() override;
+ void mediaCanStart(Document&) override;
- bool m_isInitialized;
- bool m_isAudioThreadFinished;
+ // MediaProducer
+ MediaProducer::MediaStateFlags mediaState() const override;
+ void pageMutedStateDidChange() override;
// The context itself keeps a reference to all source nodes. The source nodes, then reference all nodes they're connected to.
// In turn, these nodes reference all nodes they're connected to. All nodes are ultimately connected to the AudioDestinationNode.
// When the context dereferences a source node, it will be deactivated from the rendering graph along with all other nodes it is
// uniquely connected to. See the AudioNode::ref() and AudioNode::deref() methods for more details.
- void refNode(AudioNode*);
- void derefNode(AudioNode*);
+ void refNode(AudioNode&);
+ void derefNode(AudioNode&);
+
+ // ActiveDOMObject API.
+ void stop() override;
+ bool canSuspendForDocumentSuspension() const override;
+ const char* activeDOMObjectName() const override;
// When the context goes away, there might still be some sources which haven't finished playing.
// Make sure to dereference them here.
void derefUnfinishedSourceNodes();
- RefPtr<AudioDestinationNode> m_destinationNode;
- RefPtr<AudioListener> m_listener;
+ // PlatformMediaSessionClient
+ PlatformMediaSession::MediaType mediaType() const override { return PlatformMediaSession::WebAudio; }
+ PlatformMediaSession::MediaType presentationType() const override { return PlatformMediaSession::WebAudio; }
+ PlatformMediaSession::CharacteristicsFlags characteristics() const override { return m_state == State::Running ? PlatformMediaSession::HasAudio : PlatformMediaSession::HasNothing; }
+ void mayResumePlayback(bool shouldResume) override;
+ void suspendPlayback() override;
+ bool canReceiveRemoteControlCommands() const override { return false; }
+ void didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType, const PlatformMediaSession::RemoteCommandArgument*) override { }
+ bool supportsSeeking() const override { return false; }
+ bool shouldOverrideBackgroundPlaybackRestriction(PlatformMediaSession::InterruptionType) const override { return false; }
+ String sourceApplicationIdentifier() const override;
+ bool canProduceAudio() const final { return true; }
+
+ // EventTarget
+ void refEventTarget() override { ref(); }
+ void derefEventTarget() override { deref(); }
+
+ void handleDirtyAudioSummingJunctions();
+ void handleDirtyAudioNodeOutputs();
+
+ void addReaction(State, DOMPromise<void>&&);
+ void updateAutomaticPullNodes();
// Only accessed in the audio thread.
Vector<AudioNode*> m_finishedNodes;
@@ -318,42 +345,39 @@ private:
// They will be scheduled for deletion (on the main thread) at the end of a render cycle (in realtime thread).
Vector<AudioNode*> m_nodesToDelete;
- bool m_isDeletionScheduled;
+
+ bool m_isDeletionScheduled { false };
+ bool m_isStopScheduled { false };
+ bool m_isInitialized { false };
+ bool m_isAudioThreadFinished { false };
+ bool m_automaticPullNodesNeedUpdating { false };
+ bool m_isOfflineContext { false };
// Only accessed when the graph lock is held.
HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
- void handleDirtyAudioSummingJunctions();
- void handleDirtyAudioNodeOutputs();
// For the sake of thread safety, we maintain a seperate Vector of automatic pull nodes for rendering in m_renderingAutomaticPullNodes.
// It will be copied from m_automaticPullNodes by updateAutomaticPullNodes() at the very start or end of the rendering quantum.
HashSet<AudioNode*> m_automaticPullNodes;
Vector<AudioNode*> m_renderingAutomaticPullNodes;
- // m_automaticPullNodesNeedUpdating keeps track if m_automaticPullNodes is modified.
- bool m_automaticPullNodesNeedUpdating;
- void updateAutomaticPullNodes();
-
- unsigned m_connectionCount;
-
- // Graph locking.
- Mutex m_contextGraphMutex;
- volatile ThreadIdentifier m_audioThread;
- volatile ThreadIdentifier m_graphOwnerThread; // if the lock is held then this is the thread which owns it, otherwise == UndefinedThreadIdentifier
-
// Only accessed in the audio thread.
Vector<AudioNode*> m_deferredFinishDerefList;
-
- // HRTF Database loader
- RefPtr<HRTFDatabaseLoader> m_hrtfDatabaseLoader;
+ Vector<Vector<DOMPromise<void>>> m_stateReactions;
- // EventTarget
- virtual void refEventTarget() override { ref(); }
- virtual void derefEventTarget() override { deref(); }
+ std::unique_ptr<PlatformMediaSession> m_mediaSession;
+ std::unique_ptr<GenericEventQueue> m_eventQueue;
RefPtr<AudioBuffer> m_renderTarget;
-
- bool m_isOfflineContext;
+ RefPtr<AudioDestinationNode> m_destinationNode;
+ RefPtr<AudioListener> m_listener;
+
+ unsigned m_connectionCount { 0 };
+
+ // Graph locking.
+ Lock m_contextGraphMutex;
+ volatile ThreadIdentifier m_audioThread { 0 };
+ volatile ThreadIdentifier m_graphOwnerThread; // if the lock is held then this is the thread which owns it, otherwise == UndefinedThreadIdentifier
AsyncAudioDecoder m_audioDecoder;
@@ -362,11 +386,28 @@ private:
enum { MaxNumberOfChannels = 32 };
// Number of AudioBufferSourceNodes that are active (playing).
- std::atomic<int> m_activeSourceCount;
+ std::atomic<int> m_activeSourceCount { 0 };
- BehaviorRestrictions m_restrictions;
+ BehaviorRestrictions m_restrictions { NoRestrictions };
+
+ State m_state { State::Suspended };
};
-} // WebCore
+// FIXME: Find out why these ==/!= functions are needed and remove them if possible.
-#endif // AudioContext_h
+inline bool operator==(const AudioContext& lhs, const AudioContext& rhs)
+{
+ return &lhs == &rhs;
+}
+
+inline bool operator!=(const AudioContext& lhs, const AudioContext& rhs)
+{
+ return &lhs != &rhs;
+}
+
+inline AudioContext::State AudioContext::state() const
+{
+ return m_state;
+}
+
+} // WebCore