BaseAudioContext.h [plain text]
#pragma once
#include "ActiveDOMObject.h"
#include "AsyncAudioDecoder.h"
#include "AudioBus.h"
#include "AudioContextOptions.h"
#include "AudioContextState.h"
#include "AudioDestinationNode.h"
#include "EventTarget.h"
#include "JSDOMPromiseDeferred.h"
#include "MediaCanStartListener.h"
#include "MediaProducer.h"
#include "PeriodicWaveConstraints.h"
#include "PlatformMediaSession.h"
#include "ScriptExecutionContext.h"
#include "VisibilityChangeClient.h"
#include <JavaScriptCore/ConsoleTypes.h>
#include <JavaScriptCore/Float32Array.h>
#include <atomic>
#include <wtf/HashSet.h>
#include <wtf/LoggerHelper.h>
#include <wtf/MainThread.h>
#include <wtf/RefPtr.h>
#include <wtf/ThreadSafeRefCounted.h>
#include <wtf/Threading.h>
#include <wtf/UniqueRef.h>
#include <wtf/Vector.h>
#include <wtf/text/AtomStringHash.h>
namespace WebCore {
class AnalyserNode;
class AudioBuffer;
class AudioBufferCallback;
class AudioBufferSourceNode;
class AudioListener;
class AudioSummingJunction;
class BiquadFilterNode;
class ChannelMergerNode;
class ChannelSplitterNode;
class ConstantSourceNode;
class ConvolverNode;
class DelayNode;
class Document;
class DynamicsCompressorNode;
class GainNode;
class HTMLMediaElement;
class MainThreadGenericEventQueue;
class MediaElementAudioSourceNode;
class MediaStream;
class MediaStreamAudioDestinationNode;
class MediaStreamAudioSourceNode;
class OscillatorNode;
class PannerNode;
class PeriodicWave;
class ScriptProcessorNode;
class SecurityOrigin;
class StereoPannerNode;
class WaveShaperNode;
template<typename IDLType> class DOMPromiseDeferred;
class BaseAudioContext
: public ActiveDOMObject
, public ThreadSafeRefCounted<BaseAudioContext>
, public EventTargetWithInlineData
, public MediaCanStartListener
, public MediaProducer
#if !RELEASE_LOG_DISABLED
, public LoggerHelper
#endif
, private PlatformMediaSessionClient
, private VisibilityChangeClient
{
WTF_MAKE_ISO_ALLOCATED(BaseAudioContext);
public:
virtual ~BaseAudioContext();
using ThreadSafeRefCounted::ref;
using ThreadSafeRefCounted::deref;
Document* document() const;
bool isInitialized() const;
bool isOfflineContext() const { return m_isOfflineContext; }
virtual bool isWebKitAudioContext() const { return false; }
DocumentIdentifier hostingDocumentIdentifier() const final;
AudioDestinationNode* destination() { return m_destinationNode.get(); }
size_t currentSampleFrame() const { return m_destinationNode ? m_destinationNode->currentSampleFrame() : 0; }
double currentTime() const { return m_destinationNode ? m_destinationNode->currentTime() : 0.; }
float sampleRate() const { return m_destinationNode ? m_destinationNode->sampleRate() : 0.f; }
unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
void incrementActiveSourceCount();
void decrementActiveSourceCount();
virtual bool shouldSuspend() { return false; }
ExceptionOr<Ref<AudioBuffer>> createBuffer(unsigned numberOfChannels, unsigned length, float sampleRate);
void decodeAudioData(Ref<ArrayBuffer>&&, RefPtr<AudioBufferCallback>&&, RefPtr<AudioBufferCallback>&&, Optional<Ref<DeferredPromise>>&& = WTF::nullopt);
AudioListener& listener();
void suspendRendering(DOMPromiseDeferred<void>&&);
void resumeRendering(DOMPromiseDeferred<void>&&);
virtual void didSuspendRendering(size_t frame);
AudioBuffer* renderTarget() const { return m_renderTarget.get(); }
using State = AudioContextState;
State state() const { return m_state; }
bool isClosed() const { return m_state == State::Closed; }
bool wouldTaintOrigin(const URL&) const;
ExceptionOr<Ref<AudioBufferSourceNode>> createBufferSource();
ExceptionOr<Ref<GainNode>> createGain();
ExceptionOr<Ref<BiquadFilterNode>> createBiquadFilter();
ExceptionOr<Ref<WaveShaperNode>> createWaveShaper();
ExceptionOr<Ref<DelayNode>> createDelay(double maxDelayTime);
ExceptionOr<Ref<PannerNode>> createPanner();
ExceptionOr<Ref<ConvolverNode>> createConvolver();
ExceptionOr<Ref<DynamicsCompressorNode>> createDynamicsCompressor();
ExceptionOr<Ref<AnalyserNode>> createAnalyser();
ExceptionOr<Ref<ScriptProcessorNode>> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels);
ExceptionOr<Ref<ChannelSplitterNode>> createChannelSplitter(size_t numberOfOutputs);
ExceptionOr<Ref<ChannelMergerNode>> createChannelMerger(size_t numberOfInputs);
ExceptionOr<Ref<OscillatorNode>> createOscillator();
ExceptionOr<Ref<PeriodicWave>> createPeriodicWave(Vector<float>&& real, Vector<float>&& imaginary, const PeriodicWaveConstraints& = { });
ExceptionOr<Ref<ConstantSourceNode>> createConstantSource();
ExceptionOr<Ref<StereoPannerNode>> createStereoPanner();
void notifyNodeFinishedProcessing(AudioNode*);
void handlePreRenderTasks(const AudioIOPosition& outputPosition);
AudioIOPosition outputPosition();
void handlePostRenderTasks();
void derefFinishedSourceNodes();
void markForDeletion(AudioNode&);
void deleteMarkedNodes();
void addAutomaticPullNode(AudioNode&);
void removeAutomaticPullNode(AudioNode&);
void processAutomaticPullNodes(size_t framesToProcess);
void incrementConnectionCount()
{
ASSERT(isMainThread());
m_connectionCount++;
}
unsigned connectionCount() const { return m_connectionCount; }
void setAudioThread(Thread& thread) { m_audioThread = &thread; } Thread* audioThread() const { return m_audioThread; }
bool isAudioThread() const;
bool isAudioThreadFinished() { return m_isAudioThreadFinished; }
void lock(bool& mustReleaseLock);
bool tryLock(bool& mustReleaseLock);
void unlock();
bool isGraphOwner() const;
static unsigned maxNumberOfChannels() { return MaxNumberOfChannels; }
void addDeferredFinishDeref(AudioNode*);
void handleDeferredFinishDerefs();
void markSummingJunctionDirty(AudioSummingJunction*);
void markAudioNodeOutputDirty(AudioNodeOutput*);
void removeMarkedSummingJunction(AudioSummingJunction*);
EventTargetInterface eventTargetInterface() const final;
ScriptExecutionContext* scriptExecutionContext() const final;
void refEventTarget() override { ref(); }
void derefEventTarget() override { deref(); }
void startRendering();
void finishedRendering(bool didRendering);
static unsigned s_hardwareContextCount;
enum BehaviorRestrictionFlags {
NoRestrictions = 0,
RequireUserGestureForAudioStartRestriction = 1 << 0,
RequirePageConsentForAudioStartRestriction = 1 << 1,
};
typedef unsigned BehaviorRestrictions;
BehaviorRestrictions behaviorRestrictions() const { return m_restrictions; }
void addBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions |= restriction; }
void removeBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions &= ~restriction; }
void isPlayingAudioDidChange();
void nodeWillBeginPlayback();
#if !RELEASE_LOG_DISABLED
const Logger& logger() const final { return m_logger.get(); }
const void* logIdentifier() const final { return m_logIdentifier; }
WTFLogChannel& logChannel() const final;
const void* nextAudioNodeLogIdentifier() { return childLogIdentifier(m_logIdentifier, ++m_nextAudioNodeIdentifier); }
const void* nextAudioParameterLogIdentifier() { return childLogIdentifier(m_logIdentifier, ++m_nextAudioParameterIdentifier); }
#endif
void postTask(WTF::Function<void()>&&);
bool isStopped() const { return m_isStopScheduled; }
const SecurityOrigin* origin() const;
void addConsoleMessage(MessageSource, MessageLevel, const String& message);
class AutoLocker {
public:
explicit AutoLocker(BaseAudioContext& context)
: m_context(context)
{
m_context.lock(m_mustReleaseLock);
}
~AutoLocker()
{
if (m_mustReleaseLock)
m_context.unlock();
}
private:
BaseAudioContext& m_context;
bool m_mustReleaseLock;
};
void refNode(AudioNode&);
void derefNode(AudioNode&);
void lazyInitialize();
static bool isSupportedSampleRate(float sampleRate);
protected:
explicit BaseAudioContext(Document&, const AudioContextOptions& = { });
BaseAudioContext(Document&, AudioBuffer* renderTarget);
void clearPendingActivity();
void makePendingActivity();
void lockInternal(bool& mustReleaseLock);
AudioDestinationNode* destinationNode() const { return m_destinationNode.get(); }
bool willBeginPlayback();
virtual void uninitialize();
#if !RELEASE_LOG_DISABLED
const char* logClassName() const final { return "BaseAudioContext"; }
#endif
void addReaction(State, DOMPromiseDeferred<void>&&);
void setState(State);
virtual void didFinishOfflineRendering(ExceptionOr<Ref<AudioBuffer>>&&) { }
private:
void constructCommon();
bool willPausePlayback();
bool userGestureRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequireUserGestureForAudioStartRestriction; }
bool pageConsentRequiredForAudioStart() const { return !isOfflineContext() && m_restrictions & RequirePageConsentForAudioStartRestriction; }
void clear();
void scheduleNodeDeletion();
void mediaCanStart(Document&) override;
void dispatchEvent(Event&) final;
MediaProducer::MediaStateFlags mediaState() const override;
void pageMutedStateDidChange() override;
void suspend(ReasonForSuspension) final;
void resume() final;
void stop() override;
const char* activeDOMObjectName() const override;
void derefUnfinishedSourceNodes();
PlatformMediaSession::MediaType mediaType() const override { return PlatformMediaSession::MediaType::WebAudio; }
PlatformMediaSession::MediaType presentationType() const override { return PlatformMediaSession::MediaType::WebAudio; }
void mayResumePlayback(bool shouldResume) override;
void suspendPlayback() override;
bool canReceiveRemoteControlCommands() const override { return false; }
void didReceiveRemoteControlCommand(PlatformMediaSession::RemoteControlCommandType, const PlatformMediaSession::RemoteCommandArgument*) override { }
bool supportsSeeking() const override { return false; }
bool shouldOverrideBackgroundPlaybackRestriction(PlatformMediaSession::InterruptionType) const override { return false; }
bool canProduceAudio() const final { return true; }
bool isSuspended() const final;
void visibilityStateChanged() final;
void handleDirtyAudioSummingJunctions();
void handleDirtyAudioNodeOutputs();
void updateAutomaticPullNodes();
#if !RELEASE_LOG_DISABLED
Ref<Logger> m_logger;
const void* m_logIdentifier;
uint64_t m_nextAudioNodeIdentifier { 0 };
uint64_t m_nextAudioParameterIdentifier { 0 };
#endif
Vector<AudioNode*> m_finishedNodes;
Vector<AudioNode*> m_referencedNodes;
Vector<AudioNode*> m_nodesMarkedForDeletion;
Vector<AudioNode*> m_nodesToDelete;
bool m_isDeletionScheduled { false };
bool m_isStopScheduled { false };
bool m_isInitialized { false };
bool m_isAudioThreadFinished { false };
bool m_automaticPullNodesNeedUpdating { false };
bool m_isOfflineContext { false };
HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
HashSet<AudioNode*> m_automaticPullNodes;
Vector<AudioNode*> m_renderingAutomaticPullNodes;
Vector<AudioNode*> m_deferredFinishDerefList;
Vector<Vector<DOMPromiseDeferred<void>>> m_stateReactions;
std::unique_ptr<PlatformMediaSession> m_mediaSession;
UniqueRef<MainThreadGenericEventQueue> m_eventQueue;
RefPtr<AudioBuffer> m_renderTarget;
RefPtr<AudioDestinationNode> m_destinationNode;
RefPtr<AudioListener> m_listener;
unsigned m_connectionCount { 0 };
Lock m_contextGraphMutex;
Thread* volatile m_audioThread { nullptr };
Thread* volatile m_graphOwnerThread { nullptr };
std::unique_ptr<AsyncAudioDecoder> m_audioDecoder;
enum { MaxNumberOfChannels = 32 };
std::atomic<int> m_activeSourceCount { 0 };
BehaviorRestrictions m_restrictions { NoRestrictions };
State m_state { State::Suspended };
RefPtr<PendingActivity<BaseAudioContext>> m_pendingActivity;
AudioIOPosition m_outputPosition;
};
}