ScriptProcessorNode.cpp [plain text]
#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "ScriptProcessorNode.h"
#include "AudioBuffer.h"
#include "AudioBus.h"
#include "AudioContext.h"
#include "AudioNodeInput.h"
#include "AudioNodeOutput.h"
#include "AudioProcessingEvent.h"
#include "AudioUtilities.h"
#include "Document.h"
#include "EventNames.h"
#include <JavaScriptCore/Float32Array.h>
#include <wtf/IsoMallocInlines.h>
#include <wtf/MainThread.h>
#include <wtf/threads/BinarySemaphore.h>
namespace WebCore {
WTF_MAKE_ISO_ALLOCATED_IMPL(ScriptProcessorNode);
Ref<ScriptProcessorNode> ScriptProcessorNode::create(BaseAudioContext& context, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
{
return adoptRef(*new ScriptProcessorNode(context, bufferSize, numberOfInputChannels, numberOfOutputChannels));
}
ScriptProcessorNode::ScriptProcessorNode(BaseAudioContext& context, size_t bufferSize, unsigned numberOfInputChannels, unsigned numberOfOutputChannels)
: AudioNode(context, NodeTypeJavaScript)
, ActiveDOMObject(context.scriptExecutionContext())
, m_bufferSize(bufferSize)
, m_numberOfInputChannels(numberOfInputChannels)
, m_numberOfOutputChannels(numberOfOutputChannels)
, m_internalInputBus(AudioBus::create(numberOfInputChannels, AudioUtilities::renderQuantumSize, false))
{
if (m_bufferSize < AudioUtilities::renderQuantumSize)
m_bufferSize = AudioUtilities::renderQuantumSize;
ASSERT(numberOfInputChannels <= AudioContext::maxNumberOfChannels());
initializeDefaultNodeOptions(numberOfInputChannels, ChannelCountMode::Explicit, ChannelInterpretation::Speakers);
addInput();
addOutput(numberOfOutputChannels);
initialize();
suspendIfNeeded();
m_pendingActivity = makePendingActivity(*this);
}
ScriptProcessorNode::~ScriptProcessorNode()
{
ASSERT(!hasPendingActivity());
uninitialize();
}
void ScriptProcessorNode::initialize()
{
if (isInitialized())
return;
float sampleRate = context().sampleRate();
for (unsigned i = 0; i < bufferCount; ++i) {
m_inputBuffers[i] = m_numberOfInputChannels ? AudioBuffer::create(m_numberOfInputChannels, bufferSize(), sampleRate, AudioBuffer::LegacyPreventDetaching::Yes) : 0;
m_outputBuffers[i] = m_numberOfOutputChannels ? AudioBuffer::create(m_numberOfOutputChannels, bufferSize(), sampleRate, AudioBuffer::LegacyPreventDetaching::Yes) : 0;
}
AudioNode::initialize();
}
RefPtr<AudioBuffer> ScriptProcessorNode::createInputBufferForJS(AudioBuffer* inputBuffer) const
{
if (!inputBuffer)
return nullptr;
if (!m_cachedInputBufferForJS || !inputBuffer->copyTo(*m_cachedInputBufferForJS))
m_cachedInputBufferForJS = inputBuffer->clone();
return m_cachedInputBufferForJS;
}
RefPtr<AudioBuffer> ScriptProcessorNode::createOutputBufferForJS(AudioBuffer& outputBuffer) const
{
if (!m_cachedOutputBufferForJS || !m_cachedOutputBufferForJS->topologyMatches(outputBuffer))
m_cachedOutputBufferForJS = outputBuffer.clone(AudioBuffer::ShouldCopyChannelData::No);
else
m_cachedOutputBufferForJS->zero();
return m_cachedOutputBufferForJS;
}
void ScriptProcessorNode::uninitialize()
{
if (!isInitialized())
return;
for (unsigned i = 0; i < bufferCount; ++i) {
auto locker = holdLock(m_bufferLocks[i]);
m_inputBuffers[i] = nullptr;
m_outputBuffers[i] = nullptr;
}
AudioNode::uninitialize();
}
void ScriptProcessorNode::didBecomeMarkedForDeletion()
{
ASSERT(context().isGraphOwner());
m_pendingActivity = nullptr;
ASSERT(!hasPendingActivity());
}
void ScriptProcessorNode::process(size_t framesToProcess)
{
AudioBus* inputBus = this->input(0)->bus();
AudioBus* outputBus = this->output(0)->bus();
unsigned bufferIndex = this->bufferIndex();
ASSERT(bufferIndex < bufferCount);
auto locker = tryHoldLock(m_bufferLocks[bufferIndex]);
if (!locker) {
outputBus->zero();
return;
}
AudioBuffer* inputBuffer = m_inputBuffers[bufferIndex].get();
AudioBuffer* outputBuffer = m_outputBuffers[bufferIndex].get();
unsigned numberOfInputChannels = m_internalInputBus->numberOfChannels();
bool buffersAreGood = outputBuffer && bufferSize() == outputBuffer->length() && m_bufferReadWriteIndex + framesToProcess <= bufferSize();
if (m_internalInputBus->numberOfChannels())
buffersAreGood = buffersAreGood && inputBuffer && bufferSize() == inputBuffer->length();
ASSERT(buffersAreGood);
if (!buffersAreGood)
return;
bool isFramesToProcessGood = framesToProcess && bufferSize() >= framesToProcess && !(bufferSize() % framesToProcess);
ASSERT(isFramesToProcessGood);
if (!isFramesToProcessGood)
return;
unsigned numberOfOutputChannels = outputBus->numberOfChannels();
bool channelsAreGood = (numberOfInputChannels == m_numberOfInputChannels) && (numberOfOutputChannels == m_numberOfOutputChannels);
ASSERT(channelsAreGood);
if (!channelsAreGood)
return;
for (unsigned i = 0; i < numberOfInputChannels; i++)
m_internalInputBus->setChannelMemory(i, inputBuffer->rawChannelData(i) + m_bufferReadWriteIndex, framesToProcess);
if (numberOfInputChannels)
m_internalInputBus->copyFrom(*inputBus);
for (unsigned i = 0; i < numberOfOutputChannels; ++i)
memcpy(outputBus->channel(i)->mutableData(), outputBuffer->rawChannelData(i) + m_bufferReadWriteIndex, sizeof(float) * framesToProcess);
m_bufferReadWriteIndex = (m_bufferReadWriteIndex + framesToProcess) % bufferSize();
if (!m_bufferReadWriteIndex) {
if (context().isOfflineContext()) {
BinarySemaphore semaphore;
callOnMainThread([this, &semaphore, bufferIndex, protector = makeRef(*this)] {
fireProcessEvent(bufferIndex);
semaphore.signal();
});
semaphore.wait();
} else {
callOnMainThread([this, bufferIndex, protector = makeRef(*this)] {
auto locker = holdLock(m_bufferLocks[bufferIndex]);
fireProcessEvent(bufferIndex);
});
}
swapBuffers();
}
}
void ScriptProcessorNode::fireProcessEvent(unsigned bufferIndex)
{
ASSERT(isMainThread());
AudioBuffer* inputBuffer = m_inputBuffers[bufferIndex].get();
AudioBuffer* outputBuffer = m_outputBuffers[bufferIndex].get();
ASSERT(outputBuffer);
if (!outputBuffer)
return;
if (context().isStopped())
return;
double playbackTime = (context().currentSampleFrame() + m_bufferSize) / static_cast<double>(context().sampleRate());
auto inputBufferForJS = createInputBufferForJS(inputBuffer);
auto outputBufferForJS = createOutputBufferForJS(*outputBuffer);
dispatchEvent(AudioProcessingEvent::create(inputBufferForJS.get(), outputBufferForJS.get(), playbackTime));
if (!outputBufferForJS->copyTo(*outputBuffer))
outputBuffer->zero();
}
ExceptionOr<void> ScriptProcessorNode::setChannelCount(unsigned channelCount)
{
ASSERT(isMainThread());
if (channelCount != this->channelCount())
return Exception { NotSupportedError, "ScriptProcessorNode's channelCount cannot be changed"_s };
return { };
}
ExceptionOr<void> ScriptProcessorNode::setChannelCountMode(ChannelCountMode mode)
{
ASSERT(isMainThread());
if (mode != this->channelCountMode())
return Exception { NotSupportedError, "ScriptProcessorNode's channelCountMode cannot be changed from 'explicit'"_s };
return { };
}
double ScriptProcessorNode::tailTime() const
{
return std::numeric_limits<double>::infinity();
}
double ScriptProcessorNode::latencyTime() const
{
return std::numeric_limits<double>::infinity();
}
bool ScriptProcessorNode::requiresTailProcessing() const
{
return true;
}
void ScriptProcessorNode::eventListenersDidChange()
{
m_hasAudioProcessEventListener = hasEventListeners(eventNames().audioprocessEvent);
}
bool ScriptProcessorNode::virtualHasPendingActivity() const
{
if (context().isClosed())
return false;
return m_hasAudioProcessEventListener;
}
}
#endif // ENABLE(WEB_AUDIO)