BaseAudioContext.cpp [plain text]
#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "AudioContext.h"
#include "AnalyserNode.h"
#include "AsyncAudioDecoder.h"
#include "AudioBuffer.h"
#include "AudioBufferCallback.h"
#include "AudioBufferOptions.h"
#include "AudioBufferSourceNode.h"
#include "AudioDestination.h"
#include "AudioListener.h"
#include "AudioNodeInput.h"
#include "AudioNodeOutput.h"
#include "AudioParamDescriptor.h"
#include "AudioSession.h"
#include "AudioWorklet.h"
#include "BiquadFilterNode.h"
#include "ChannelMergerNode.h"
#include "ChannelMergerOptions.h"
#include "ChannelSplitterNode.h"
#include "ChannelSplitterOptions.h"
#include "ConstantSourceNode.h"
#include "ConstantSourceOptions.h"
#include "ConvolverNode.h"
#include "DefaultAudioDestinationNode.h"
#include "DelayNode.h"
#include "DelayOptions.h"
#include "Document.h"
#include "DynamicsCompressorNode.h"
#include "EventNames.h"
#include "FFTFrame.h"
#include "Frame.h"
#include "FrameLoader.h"
#include "GainNode.h"
#include "HRTFDatabaseLoader.h"
#include "HRTFPanner.h"
#include "IIRFilterNode.h"
#include "IIRFilterOptions.h"
#include "JSAudioBuffer.h"
#include "JSDOMPromiseDeferred.h"
#include "Logging.h"
#include "NetworkingContext.h"
#include "OfflineAudioCompletionEvent.h"
#include "OfflineAudioDestinationNode.h"
#include "OscillatorNode.h"
#include "Page.h"
#include "PannerNode.h"
#include "PeriodicWave.h"
#include "PeriodicWaveOptions.h"
#include "ScriptController.h"
#include "ScriptProcessorNode.h"
#include "StereoPannerNode.h"
#include "StereoPannerOptions.h"
#include "WaveShaperNode.h"
#include "WebKitAudioListener.h"
#include <JavaScriptCore/ScriptCallStack.h>
#include <wtf/Scope.h>
#if DEBUG_AUDIONODE_REFERENCES
#include <stdio.h>
#endif
#if USE(GSTREAMER)
#include "GStreamerCommon.h"
#endif
#if PLATFORM(IOS_FAMILY)
#include "ScriptController.h"
#include "Settings.h"
#endif
#include <JavaScriptCore/ArrayBuffer.h>
#include <wtf/Atomics.h>
#include <wtf/IsoMallocInlines.h>
#include <wtf/MainThread.h>
#include <wtf/Ref.h>
#include <wtf/RefCounted.h>
#include <wtf/Scope.h>
#include <wtf/text/WTFString.h>
namespace WebCore {
WTF_MAKE_ISO_ALLOCATED_IMPL(BaseAudioContext);
bool BaseAudioContext::isSupportedSampleRate(float sampleRate)
{
return sampleRate >= 3000 && sampleRate <= 384000;
}
unsigned BaseAudioContext::s_hardwareContextCount = 0;
BaseAudioContext::BaseAudioContext(Document& document, const AudioContextOptions& contextOptions)
: ActiveDOMObject(document)
#if !RELEASE_LOG_DISABLED
, m_logger(document.logger())
, m_logIdentifier(uniqueLogIdentifier())
#endif
, m_worklet(AudioWorklet::create(*this))
{
makePendingActivity();
FFTFrame::initialize();
m_destinationNode = DefaultAudioDestinationNode::create(*this, contextOptions.sampleRate);
postTask([this] {
if (m_isStopScheduled)
return;
lazyInitialize();
});
}
BaseAudioContext::BaseAudioContext(Document& document, unsigned numberOfChannels, float sampleRate, RefPtr<AudioBuffer>&& renderTarget)
: ActiveDOMObject(document)
#if !RELEASE_LOG_DISABLED
, m_logger(document.logger())
, m_logIdentifier(uniqueLogIdentifier())
#endif
, m_worklet(AudioWorklet::create(*this))
, m_isOfflineContext(true)
, m_renderTarget(WTFMove(renderTarget))
{
FFTFrame::initialize();
m_destinationNode = OfflineAudioDestinationNode::create(*this, numberOfChannels, sampleRate, m_renderTarget.copyRef());
}
BaseAudioContext::~BaseAudioContext()
{
#if DEBUG_AUDIONODE_REFERENCES
fprintf(stderr, "%p: BaseAudioContext::~AudioContext()\n", this);
#endif
ASSERT(!m_isInitialized);
ASSERT(m_isStopScheduled);
ASSERT(m_nodesToDelete.isEmpty());
ASSERT(m_referencedSourceNodes.isEmpty());
ASSERT(m_finishedSourceNodes.isEmpty());
ASSERT(m_automaticPullNodes.isEmpty());
if (m_automaticPullNodesNeedUpdating)
m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
ASSERT(m_renderingAutomaticPullNodes.isEmpty());
}
void BaseAudioContext::lazyInitialize()
{
if (isStopped())
return;
if (m_isInitialized)
return;
ASSERT(!m_isAudioThreadFinished);
if (m_isAudioThreadFinished)
return;
if (m_destinationNode)
m_destinationNode->initialize();
m_isInitialized = true;
}
void BaseAudioContext::clear()
{
auto protectedThis = makeRef(*this);
if (m_destinationNode)
m_destinationNode = nullptr;
do {
deleteMarkedNodes();
m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
m_nodesMarkedForDeletion.clear();
} while (m_nodesToDelete.size());
clearPendingActivity();
}
void BaseAudioContext::uninitialize()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
if (!m_isInitialized)
return;
if (m_destinationNode)
m_destinationNode->uninitialize();
m_isAudioThreadFinished = true;
if (!isOfflineContext()) {
ASSERT(s_hardwareContextCount);
--s_hardwareContextCount;
setState(State::Closed);
}
{
AutoLocker locker(*this);
derefFinishedSourceNodes();
}
derefUnfinishedSourceNodes();
m_isInitialized = false;
}
bool BaseAudioContext::isInitialized() const
{
return m_isInitialized;
}
void BaseAudioContext::addReaction(State state, DOMPromiseDeferred<void>&& promise)
{
size_t stateIndex = static_cast<size_t>(state);
if (stateIndex >= m_stateReactions.size())
m_stateReactions.grow(stateIndex + 1);
m_stateReactions[stateIndex].append(WTFMove(promise));
}
void BaseAudioContext::setState(State state)
{
if (m_state != state) {
m_state = state;
queueTaskToDispatchEvent(*this, TaskSource::MediaElement, Event::create(eventNames().statechangeEvent, Event::CanBubble::Yes, Event::IsCancelable::No));
}
size_t stateIndex = static_cast<size_t>(state);
if (stateIndex >= m_stateReactions.size())
return;
Vector<DOMPromiseDeferred<void>> reactions;
m_stateReactions[stateIndex].swap(reactions);
for (auto& promise : reactions)
promise.resolve();
}
void BaseAudioContext::stop()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
if (m_isStopScheduled)
return;
m_isStopScheduled = true;
ASSERT(document());
document()->updateIsPlayingMedia();
uninitialize();
clear();
}
const char* BaseAudioContext::activeDOMObjectName() const
{
return "AudioContext";
}
Document* BaseAudioContext::document() const
{
return downcast<Document>(m_scriptExecutionContext);
}
float BaseAudioContext::sampleRate() const
{
return m_destinationNode ? m_destinationNode->sampleRate() : AudioDestination::hardwareSampleRate();
}
bool BaseAudioContext::wouldTaintOrigin(const URL& url) const
{
if (url.protocolIsData())
return false;
if (auto* document = this->document())
return !document->securityOrigin().canRequest(url);
return false;
}
ExceptionOr<Ref<AudioBuffer>> BaseAudioContext::createBuffer(unsigned numberOfChannels, unsigned length, float sampleRate)
{
return AudioBuffer::create(AudioBufferOptions {numberOfChannels, length, sampleRate});
}
void BaseAudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback, Optional<Ref<DeferredPromise>>&& promise)
{
if (promise && (!document() || !document()->isFullyActive())) {
promise.value()->reject(Exception { InvalidStateError, "Document is not fully active"_s });
return;
}
if (!m_audioDecoder)
m_audioDecoder = makeUnique<AsyncAudioDecoder>();
m_audioDecoder->decodeAsync(WTFMove(audioData), sampleRate(), [this, activity = ActiveDOMObject::makePendingActivity(*this), successCallback = WTFMove(successCallback), errorCallback = WTFMove(errorCallback), promise = WTFMove(promise)](ExceptionOr<Ref<AudioBuffer>>&& result) mutable {
queueTaskKeepingObjectAlive(*this, TaskSource::InternalAsyncTask, [successCallback = WTFMove(successCallback), errorCallback = WTFMove(errorCallback), promise = WTFMove(promise), result = WTFMove(result)]() mutable {
if (result.hasException()) {
if (promise)
promise.value()->reject(result.releaseException());
if (errorCallback)
errorCallback->handleEvent(nullptr);
return;
}
auto audioBuffer = result.releaseReturnValue();
if (promise)
promise.value()->resolve<IDLInterface<AudioBuffer>>(audioBuffer.get());
if (successCallback)
successCallback->handleEvent(audioBuffer.ptr());
});
});
}
AudioListener& WebCore::BaseAudioContext::listener()
{
if (!m_listener) {
if (isWebKitAudioContext())
m_listener = WebKitAudioListener::create(*this);
else
m_listener = AudioListener::create(*this);
}
return *m_listener;
}
ExceptionOr<Ref<AudioBufferSourceNode>> BaseAudioContext::createBufferSource()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return AudioBufferSourceNode::create(*this);
}
ExceptionOr<Ref<ScriptProcessorNode>> BaseAudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels)
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
switch (bufferSize) {
case 0:
#if USE(AUDIO_SESSION)
bufferSize = 1 << std::max<size_t>(8, std::min<size_t>(14, std::log2(AudioSession::sharedSession().bufferSize())));
#else
bufferSize = 2048;
#endif
break;
case 256:
case 512:
case 1024:
case 2048:
case 4096:
case 8192:
case 16384:
break;
default:
return Exception { IndexSizeError, "Unsupported buffer size for ScriptProcessorNode"_s };
}
if (!numberOfInputChannels && !numberOfOutputChannels)
return Exception { NotSupportedError, "numberOfInputChannels and numberOfOutputChannels cannot both be 0"_s };
if (numberOfInputChannels > maxNumberOfChannels())
return Exception { NotSupportedError, "numberOfInputChannels exceeds maximum number of channels"_s };
if (numberOfOutputChannels > maxNumberOfChannels())
return Exception { NotSupportedError, "numberOfOutputChannels exceeds maximum number of channels"_s };
return ScriptProcessorNode::create(*this, bufferSize, numberOfInputChannels, numberOfOutputChannels);
}
ExceptionOr<Ref<BiquadFilterNode>> BaseAudioContext::createBiquadFilter()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return BiquadFilterNode::create(*this);
}
ExceptionOr<Ref<WaveShaperNode>> BaseAudioContext::createWaveShaper()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return WaveShaperNode::create(*this);
}
ExceptionOr<Ref<PannerNode>> BaseAudioContext::createPanner()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return PannerNode::create(*this);
}
ExceptionOr<Ref<ConvolverNode>> BaseAudioContext::createConvolver()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return ConvolverNode::create(*this);
}
ExceptionOr<Ref<DynamicsCompressorNode>> BaseAudioContext::createDynamicsCompressor()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return DynamicsCompressorNode::create(*this);
}
ExceptionOr<Ref<AnalyserNode>> BaseAudioContext::createAnalyser()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return AnalyserNode::create(*this);
}
ExceptionOr<Ref<GainNode>> BaseAudioContext::createGain()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return GainNode::create(*this);
}
ExceptionOr<Ref<DelayNode>> BaseAudioContext::createDelay(double maxDelayTime)
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
DelayOptions options;
options.maxDelayTime = maxDelayTime;
return DelayNode::create(*this, options);
}
ExceptionOr<Ref<ChannelSplitterNode>> BaseAudioContext::createChannelSplitter(size_t numberOfOutputs)
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
ChannelSplitterOptions options;
options.numberOfOutputs = numberOfOutputs;
return ChannelSplitterNode::create(*this, options);
}
ExceptionOr<Ref<ChannelMergerNode>> BaseAudioContext::createChannelMerger(size_t numberOfInputs)
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
ChannelMergerOptions options;
options.numberOfInputs = numberOfInputs;
return ChannelMergerNode::create(*this, options);
}
ExceptionOr<Ref<OscillatorNode>> BaseAudioContext::createOscillator()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return OscillatorNode::create(*this);
}
ExceptionOr<Ref<PeriodicWave>> BaseAudioContext::createPeriodicWave(Vector<float>&& real, Vector<float>&& imaginary, const PeriodicWaveConstraints& constraints)
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
PeriodicWaveOptions options;
options.real = WTFMove(real);
options.imag = WTFMove(imaginary);
options.disableNormalization = constraints.disableNormalization;
return PeriodicWave::create(*this, WTFMove(options));
}
ExceptionOr<Ref<ConstantSourceNode>> BaseAudioContext::createConstantSource()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return ConstantSourceNode::create(*this);
}
ExceptionOr<Ref<StereoPannerNode>> BaseAudioContext::createStereoPanner()
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return StereoPannerNode::create(*this);
}
ExceptionOr<Ref<IIRFilterNode>> BaseAudioContext::createIIRFilter(ScriptExecutionContext& scriptExecutionContext, Vector<double>&& feedforward, Vector<double>&& feedback)
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
IIRFilterOptions options;
options.feedforward = WTFMove(feedforward);
options.feedback = WTFMove(feedback);
return IIRFilterNode::create(scriptExecutionContext, *this, WTFMove(options));
}
void BaseAudioContext::derefFinishedSourceNodes()
{
ASSERT(isGraphOwner());
ASSERT(isAudioThread() || isAudioThreadFinished());
for (auto& node : m_finishedSourceNodes)
derefSourceNode(*node);
m_finishedSourceNodes.clear();
}
void BaseAudioContext::refSourceNode(AudioNode& node)
{
ASSERT(isMainThread());
AutoLocker locker(*this);
ASSERT(!m_referencedSourceNodes.contains(&node));
m_referencedSourceNodes.append(&node);
}
void BaseAudioContext::derefSourceNode(AudioNode& node)
{
ASSERT(isGraphOwner());
ASSERT(m_referencedSourceNodes.contains(&node));
m_referencedSourceNodes.removeFirst(&node);
}
void BaseAudioContext::derefUnfinishedSourceNodes()
{
ASSERT(isMainThread() && isAudioThreadFinished());
m_referencedSourceNodes.clear();
}
void BaseAudioContext::lock(bool& mustReleaseLock)
{
ASSERT(isMainThread());
lockInternal(mustReleaseLock);
}
void BaseAudioContext::lockInternal(bool& mustReleaseLock)
{
Thread& thisThread = Thread::current();
if (&thisThread == m_graphOwnerThread) {
mustReleaseLock = false;
} else {
m_contextGraphMutex.lock();
m_graphOwnerThread = &thisThread;
mustReleaseLock = true;
}
}
bool BaseAudioContext::tryLock(bool& mustReleaseLock)
{
Thread& thisThread = Thread::current();
bool isAudioThread = &thisThread == audioThread();
ASSERT(isAudioThread || isAudioThreadFinished());
if (!isAudioThread) {
lock(mustReleaseLock);
return true;
}
bool hasLock;
if (&thisThread == m_graphOwnerThread) {
hasLock = true;
mustReleaseLock = false;
} else {
hasLock = m_contextGraphMutex.tryLock();
if (hasLock)
m_graphOwnerThread = &thisThread;
mustReleaseLock = hasLock;
}
return hasLock;
}
void BaseAudioContext::unlock()
{
ASSERT(m_graphOwnerThread == &Thread::current());
m_graphOwnerThread = nullptr;
m_contextGraphMutex.unlock();
}
bool BaseAudioContext::isAudioThread() const
{
return m_audioThread == &Thread::current();
}
bool BaseAudioContext::isGraphOwner() const
{
return m_graphOwnerThread == &Thread::current();
}
void BaseAudioContext::addDeferredDecrementConnectionCount(AudioNode* node)
{
ASSERT(isAudioThread());
m_deferredBreakConnectionList.append(node);
}
void BaseAudioContext::handlePreRenderTasks(const AudioIOPosition& outputPosition)
{
ASSERT(isAudioThread());
bool mustReleaseLock;
if (tryLock(mustReleaseLock)) {
handleDirtyAudioSummingJunctions();
handleDirtyAudioNodeOutputs();
updateAutomaticPullNodes();
m_outputPosition = outputPosition;
if (mustReleaseLock)
unlock();
}
}
AudioIOPosition BaseAudioContext::outputPosition()
{
ASSERT(isMainThread());
AutoLocker locker(*this);
return m_outputPosition;
}
void BaseAudioContext::handlePostRenderTasks()
{
ASSERT(isAudioThread());
bool mustReleaseLock;
if (tryLock(mustReleaseLock)) {
handleDeferredDecrementConnectionCounts();
derefFinishedSourceNodes();
scheduleNodeDeletion();
handleDirtyAudioSummingJunctions();
handleDirtyAudioNodeOutputs();
updateAutomaticPullNodes();
if (mustReleaseLock)
unlock();
}
}
void BaseAudioContext::handleDeferredDecrementConnectionCounts()
{
ASSERT(isAudioThread() && isGraphOwner());
for (auto& node : m_deferredBreakConnectionList)
node->decrementConnectionCountWithLock();
m_deferredBreakConnectionList.clear();
}
void BaseAudioContext::markForDeletion(AudioNode& node)
{
ASSERT(isGraphOwner());
if (isAudioThreadFinished())
m_nodesToDelete.append(&node);
else
m_nodesMarkedForDeletion.append(&node);
removeAutomaticPullNode(node);
}
void BaseAudioContext::scheduleNodeDeletion()
{
bool isGood = m_isInitialized && isGraphOwner();
ASSERT(isGood);
if (!isGood)
return;
if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
m_nodesMarkedForDeletion.clear();
m_isDeletionScheduled = true;
callOnMainThread([protectedThis = makeRef(*this)]() mutable {
protectedThis->deleteMarkedNodes();
});
}
}
void BaseAudioContext::deleteMarkedNodes()
{
ASSERT(isMainThread());
auto protectedThis = makeRef(*this);
{
AutoLocker locker(*this);
while (m_nodesToDelete.size()) {
AudioNode* node = m_nodesToDelete.takeLast();
unsigned numberOfInputs = node->numberOfInputs();
for (unsigned i = 0; i < numberOfInputs; ++i)
m_dirtySummingJunctions.remove(node->input(i));
unsigned numberOfOutputs = node->numberOfOutputs();
for (unsigned i = 0; i < numberOfOutputs; ++i)
m_dirtyAudioNodeOutputs.remove(node->output(i));
delete node;
}
m_isDeletionScheduled = false;
}
}
void BaseAudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
{
ASSERT(isGraphOwner());
m_dirtySummingJunctions.add(summingJunction);
}
void BaseAudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
{
ASSERT(isMainThread());
AutoLocker locker(*this);
m_dirtySummingJunctions.remove(summingJunction);
}
EventTargetInterface BaseAudioContext::eventTargetInterface() const
{
return BaseAudioContextEventTargetInterfaceType;
}
void BaseAudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
{
ASSERT(isGraphOwner());
m_dirtyAudioNodeOutputs.add(output);
}
void BaseAudioContext::handleDirtyAudioSummingJunctions()
{
ASSERT(isGraphOwner());
for (auto& junction : m_dirtySummingJunctions)
junction->updateRenderingState();
m_dirtySummingJunctions.clear();
}
void BaseAudioContext::handleDirtyAudioNodeOutputs()
{
ASSERT(isGraphOwner());
for (auto& output : m_dirtyAudioNodeOutputs)
output->updateRenderingState();
m_dirtyAudioNodeOutputs.clear();
}
void BaseAudioContext::addAutomaticPullNode(AudioNode& node)
{
ASSERT(isGraphOwner());
if (m_automaticPullNodes.add(&node).isNewEntry)
m_automaticPullNodesNeedUpdating = true;
}
void BaseAudioContext::removeAutomaticPullNode(AudioNode& node)
{
ASSERT(isGraphOwner());
if (m_automaticPullNodes.remove(&node))
m_automaticPullNodesNeedUpdating = true;
}
void BaseAudioContext::updateAutomaticPullNodes()
{
ASSERT(isGraphOwner());
if (m_automaticPullNodesNeedUpdating) {
m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
unsigned i = 0;
for (auto& output : m_automaticPullNodes)
m_renderingAutomaticPullNodes[i++] = output;
m_automaticPullNodesNeedUpdating = false;
}
}
void BaseAudioContext::processAutomaticPullNodes(size_t framesToProcess)
{
ASSERT(isAudioThread());
for (auto& node : m_renderingAutomaticPullNodes)
node->processIfNecessary(framesToProcess);
}
ScriptExecutionContext* BaseAudioContext::scriptExecutionContext() const
{
return ActiveDOMObject::scriptExecutionContext();
}
void BaseAudioContext::isPlayingAudioDidChange()
{
callOnMainThread([protectedThis = makeRef(*this)] {
if (protectedThis->document())
protectedThis->document()->updateIsPlayingMedia();
});
}
void BaseAudioContext::finishedRendering(bool didRendering)
{
ASSERT(isOfflineContext());
ASSERT(isMainThread());
auto finishedRenderingScope = WTF::makeScopeExit([this] {
didFinishOfflineRendering(Exception { InvalidStateError, "Offline rendering failed"_s });
});
if (!isMainThread())
return;
auto clearPendingActivityIfExitEarly = WTF::makeScopeExit([this] {
clearPendingActivity();
});
ALWAYS_LOG(LOGIDENTIFIER);
if (!didRendering)
return;
RefPtr<AudioBuffer> renderedBuffer = m_renderTarget.get();
setState(State::Closed);
ASSERT(renderedBuffer);
if (!renderedBuffer)
return;
if (m_isStopScheduled)
return;
clearPendingActivityIfExitEarly.release();
queueTaskToDispatchEvent(*this, TaskSource::MediaElement, OfflineAudioCompletionEvent::create(*renderedBuffer));
finishedRenderingScope.release();
didFinishOfflineRendering(renderedBuffer.releaseNonNull());
}
void BaseAudioContext::dispatchEvent(Event& event)
{
EventTarget::dispatchEvent(event);
if (event.eventInterface() == OfflineAudioCompletionEventInterfaceType)
clearPendingActivity();
}
void BaseAudioContext::incrementActiveSourceCount()
{
++m_activeSourceCount;
}
void BaseAudioContext::decrementActiveSourceCount()
{
--m_activeSourceCount;
}
void BaseAudioContext::didSuspendRendering(size_t)
{
setState(State::Suspended);
}
void BaseAudioContext::postTask(WTF::Function<void()>&& task)
{
ASSERT(isMainThread());
if (m_isStopScheduled)
return;
queueTaskKeepingObjectAlive(*this, TaskSource::MediaElement, WTFMove(task));
}
const SecurityOrigin* BaseAudioContext::origin() const
{
return m_scriptExecutionContext ? m_scriptExecutionContext->securityOrigin() : nullptr;
}
void BaseAudioContext::addConsoleMessage(MessageSource source, MessageLevel level, const String& message)
{
if (m_scriptExecutionContext)
m_scriptExecutionContext->addConsoleMessage(source, level, message);
}
void BaseAudioContext::clearPendingActivity()
{
m_pendingActivity = nullptr;
}
void BaseAudioContext::makePendingActivity()
{
if (!m_pendingActivity)
m_pendingActivity = ActiveDOMObject::makePendingActivity(*this);
}
PeriodicWave& BaseAudioContext::periodicWave(OscillatorType type)
{
switch (type) {
case OscillatorType::Square:
if (!m_cachedPeriodicWaveSquare)
m_cachedPeriodicWaveSquare = PeriodicWave::createSquare(sampleRate());
return *m_cachedPeriodicWaveSquare;
case OscillatorType::Sawtooth:
if (!m_cachedPeriodicWaveSawtooth)
m_cachedPeriodicWaveSawtooth = PeriodicWave::createSawtooth(sampleRate());
return *m_cachedPeriodicWaveSawtooth;
case OscillatorType::Triangle:
if (!m_cachedPeriodicWaveTriangle)
m_cachedPeriodicWaveTriangle = PeriodicWave::createTriangle(sampleRate());
return *m_cachedPeriodicWaveTriangle;
case OscillatorType::Custom:
RELEASE_ASSERT_NOT_REACHED();
case OscillatorType::Sine:
if (!m_cachedPeriodicWaveSine)
m_cachedPeriodicWaveSine = PeriodicWave::createSine(sampleRate());
return *m_cachedPeriodicWaveSine;
}
RELEASE_ASSERT_NOT_REACHED();
}
void BaseAudioContext::addAudioParamDescriptors(const String& processorName, Vector<AudioParamDescriptor>&& descriptors)
{
ASSERT(!m_parameterDescriptorMap.contains(processorName));
bool wasEmpty = m_parameterDescriptorMap.isEmpty();
m_parameterDescriptorMap.add(processorName, WTFMove(descriptors));
if (wasEmpty)
workletIsReady();
}
void BaseAudioContext::sourceNodeWillBeginPlayback(AudioNode& node)
{
refSourceNode(node);
}
void BaseAudioContext::sourceNodeDidFinishPlayback(AudioNode& node)
{
ASSERT(isAudioThread());
m_finishedSourceNodes.append(&node);
}
void BaseAudioContext::workletIsReady()
{
ASSERT(isMainThread());
if (m_destinationNode)
m_destinationNode->restartRendering();
}
#if !RELEASE_LOG_DISABLED
WTFLogChannel& BaseAudioContext::logChannel() const
{
return LogMedia;
}
#endif
}
#endif // ENABLE(WEB_AUDIO)