#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "AudioContext.h"
#include "ArrayBuffer.h"
#include "AudioBuffer.h"
#include "AudioBufferSourceNode.h"
#include "AudioChannelMerger.h"
#include "AudioChannelSplitter.h"
#include "AudioGainNode.h"
#include "AudioListener.h"
#include "AudioNodeInput.h"
#include "AudioNodeOutput.h"
#include "AudioPannerNode.h"
#include "ConvolverNode.h"
#include "DefaultAudioDestinationNode.h"
#include "DelayNode.h"
#include "Document.h"
#include "DynamicsCompressorNode.h"
#include "FFTFrame.h"
#include "HRTFDatabaseLoader.h"
#include "HRTFPanner.h"
#include "HighPass2FilterNode.h"
#include "JavaScriptAudioNode.h"
#include "LowPass2FilterNode.h"
#include "OfflineAudioCompletionEvent.h"
#include "OfflineAudioDestinationNode.h"
#include "PlatformString.h"
#include "RealtimeAnalyserNode.h"
#if DEBUG_AUDIONODE_REFERENCES
#include <stdio.h>
#endif
#include <wtf/OwnPtr.h>
#include <wtf/PassOwnPtr.h>
#include <wtf/RefCounted.h>
const int UndefinedThreadIdentifier = 0xffffffff;
const unsigned MaxNodesToDeletePerQuantum = 10;
namespace WebCore {
PassRefPtr<AudioContext> AudioContext::create(Document* document)
{
return adoptRef(new AudioContext(document));
}
PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
{
return adoptRef(new AudioContext(document, numberOfChannels, numberOfFrames, sampleRate));
}
AudioContext::AudioContext(Document* document)
: ActiveDOMObject(document, this)
, m_isInitialized(false)
, m_isAudioThreadFinished(false)
, m_document(document)
, m_destinationNode(0)
, m_connectionCount(0)
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(false)
{
constructCommon();
m_destinationNode = DefaultAudioDestinationNode::create(this);
m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
m_destinationNode->startRendering();
}
AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
: ActiveDOMObject(document, this)
, m_isInitialized(false)
, m_isAudioThreadFinished(false)
, m_document(document)
, m_destinationNode(0)
, m_connectionCount(0)
, m_audioThread(0)
, m_graphOwnerThread(UndefinedThreadIdentifier)
, m_isOfflineContext(true)
{
constructCommon();
m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
}
void AudioContext::constructCommon()
{
relaxAdoptionRequirement();
FFTFrame::initialize();
m_listener = AudioListener::create();
m_temporaryMonoBus = adoptPtr(new AudioBus(1, AudioNode::ProcessingSizeInFrames));
m_temporaryStereoBus = adoptPtr(new AudioBus(2, AudioNode::ProcessingSizeInFrames));
}
AudioContext::~AudioContext()
{
#if DEBUG_AUDIONODE_REFERENCES
printf("%p: AudioContext::~AudioContext()\n", this);
#endif
ASSERT(!m_nodesToDelete.size());
ASSERT(!m_referencedNodes.size());
ASSERT(!m_finishedNodes.size());
}
void AudioContext::lazyInitialize()
{
if (!m_isInitialized) {
ASSERT(!m_isAudioThreadFinished);
if (!m_isAudioThreadFinished) {
if (m_destinationNode.get()) {
m_destinationNode->initialize();
}
m_isInitialized = true;
}
}
}
void AudioContext::uninitialize()
{
if (m_isInitialized) {
m_destinationNode->uninitialize();
m_isAudioThreadFinished = true;
m_destinationNode.clear();
derefUnfinishedSourceNodes();
for (unsigned i = 0; i < m_allocatedBuffers.size(); ++i)
m_allocatedBuffers[i]->releaseMemory();
m_allocatedBuffers.clear();
m_isInitialized = false;
}
}
bool AudioContext::isInitialized() const
{
return m_isInitialized;
}
bool AudioContext::isRunnable() const
{
if (!isInitialized())
return false;
return m_hrtfDatabaseLoader->isLoaded();
}
void AudioContext::stop()
{
m_document = 0; uninitialize();
}
Document* AudioContext::document() const
{
ASSERT(m_document);
return m_document;
}
bool AudioContext::hasDocument()
{
return m_document;
}
void AudioContext::refBuffer(PassRefPtr<AudioBuffer> buffer)
{
m_allocatedBuffers.append(buffer);
}
PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
{
return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
}
PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono)
{
ASSERT(arrayBuffer);
if (!arrayBuffer)
return 0;
return AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
}
PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
{
ASSERT(isMainThread());
lazyInitialize();
RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
refNode(node.get()); return node;
}
PassRefPtr<JavaScriptAudioNode> AudioContext::createJavaScriptNode(size_t bufferSize)
{
ASSERT(isMainThread());
lazyInitialize();
RefPtr<JavaScriptAudioNode> node = JavaScriptAudioNode::create(this, m_destinationNode->sampleRate(), bufferSize);
refNode(node.get()); return node;
}
PassRefPtr<LowPass2FilterNode> AudioContext::createLowPass2Filter()
{
ASSERT(isMainThread());
lazyInitialize();
return LowPass2FilterNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<HighPass2FilterNode> AudioContext::createHighPass2Filter()
{
ASSERT(isMainThread());
lazyInitialize();
return HighPass2FilterNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<AudioPannerNode> AudioContext::createPanner()
{
ASSERT(isMainThread());
lazyInitialize();
return AudioPannerNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<ConvolverNode> AudioContext::createConvolver()
{
ASSERT(isMainThread());
lazyInitialize();
return ConvolverNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
{
ASSERT(isMainThread());
lazyInitialize();
return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<RealtimeAnalyserNode> AudioContext::createAnalyser()
{
ASSERT(isMainThread());
lazyInitialize();
return RealtimeAnalyserNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<AudioGainNode> AudioContext::createGainNode()
{
ASSERT(isMainThread());
lazyInitialize();
return AudioGainNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<DelayNode> AudioContext::createDelayNode()
{
ASSERT(isMainThread());
lazyInitialize();
return DelayNode::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<AudioChannelSplitter> AudioContext::createChannelSplitter()
{
ASSERT(isMainThread());
lazyInitialize();
return AudioChannelSplitter::create(this, m_destinationNode->sampleRate());
}
PassRefPtr<AudioChannelMerger> AudioContext::createChannelMerger()
{
ASSERT(isMainThread());
lazyInitialize();
return AudioChannelMerger::create(this, m_destinationNode->sampleRate());
}
void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
{
ASSERT(isAudioThread());
m_finishedNodes.append(node);
}
void AudioContext::derefFinishedSourceNodes()
{
ASSERT(isGraphOwner());
ASSERT(isAudioThread() || isAudioThreadFinished());
for (unsigned i = 0; i < m_finishedNodes.size(); i++)
derefNode(m_finishedNodes[i]);
m_finishedNodes.clear();
}
void AudioContext::refNode(AudioNode* node)
{
ASSERT(isMainThread());
AutoLocker locker(this);
node->ref(AudioNode::RefTypeConnection);
m_referencedNodes.append(node);
}
void AudioContext::derefNode(AudioNode* node)
{
ASSERT(isGraphOwner());
node->deref(AudioNode::RefTypeConnection);
for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
if (node == m_referencedNodes[i]) {
m_referencedNodes.remove(i);
break;
}
}
}
void AudioContext::derefUnfinishedSourceNodes()
{
ASSERT(isMainThread() && isAudioThreadFinished());
for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
m_referencedNodes.clear();
}
void AudioContext::lock(bool& mustReleaseLock)
{
ASSERT(isMainThread());
ThreadIdentifier thisThread = currentThread();
if (thisThread == m_graphOwnerThread) {
mustReleaseLock = false;
} else {
m_contextGraphMutex.lock();
m_graphOwnerThread = thisThread;
mustReleaseLock = true;
}
}
bool AudioContext::tryLock(bool& mustReleaseLock)
{
ThreadIdentifier thisThread = currentThread();
bool isAudioThread = thisThread == audioThread();
ASSERT(isAudioThread || isAudioThreadFinished());
if (!isAudioThread) {
lock(mustReleaseLock);
return true;
}
bool hasLock;
if (thisThread == m_graphOwnerThread) {
hasLock = true;
mustReleaseLock = false;
} else {
hasLock = m_contextGraphMutex.tryLock();
if (hasLock)
m_graphOwnerThread = thisThread;
mustReleaseLock = hasLock;
}
return hasLock;
}
void AudioContext::unlock()
{
ASSERT(currentThread() == m_graphOwnerThread);
m_graphOwnerThread = UndefinedThreadIdentifier;
m_contextGraphMutex.unlock();
}
bool AudioContext::isAudioThread() const
{
return currentThread() == m_audioThread;
}
bool AudioContext::isGraphOwner() const
{
return currentThread() == m_graphOwnerThread;
}
void AudioContext::addDeferredFinishDeref(AudioNode* node, AudioNode::RefType refType)
{
ASSERT(isAudioThread());
m_deferredFinishDerefList.append(AudioContext::RefInfo(node, refType));
}
void AudioContext::handlePreRenderTasks()
{
ASSERT(isAudioThread());
bool mustReleaseLock;
if (tryLock(mustReleaseLock)) {
handleDirtyAudioNodeInputs();
handleDirtyAudioNodeOutputs();
if (mustReleaseLock)
unlock();
}
}
void AudioContext::handlePostRenderTasks()
{
ASSERT(isAudioThread());
bool mustReleaseLock;
if (tryLock(mustReleaseLock)) {
handleDeferredFinishDerefs();
derefFinishedSourceNodes();
deleteMarkedNodes();
handleDirtyAudioNodeInputs();
handleDirtyAudioNodeOutputs();
if (mustReleaseLock)
unlock();
}
}
void AudioContext::handleDeferredFinishDerefs()
{
ASSERT(isAudioThread() && isGraphOwner());
for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
AudioNode* node = m_deferredFinishDerefList[i].m_node;
AudioNode::RefType refType = m_deferredFinishDerefList[i].m_refType;
node->finishDeref(refType);
}
m_deferredFinishDerefList.clear();
}
void AudioContext::markForDeletion(AudioNode* node)
{
ASSERT(isGraphOwner());
m_nodesToDelete.append(node);
}
void AudioContext::deleteMarkedNodes()
{
ASSERT(isGraphOwner() || isAudioThreadFinished());
size_t nodesDeleted = 0;
while (size_t n = m_nodesToDelete.size()) {
AudioNode* node = m_nodesToDelete[n - 1];
m_nodesToDelete.removeLast();
unsigned numberOfInputs = node->numberOfInputs();
for (unsigned i = 0; i < numberOfInputs; ++i)
m_dirtyAudioNodeInputs.remove(node->input(i));
unsigned numberOfOutputs = node->numberOfOutputs();
for (unsigned i = 0; i < numberOfOutputs; ++i)
m_dirtyAudioNodeOutputs.remove(node->output(i));
delete node;
if (++nodesDeleted > MaxNodesToDeletePerQuantum)
break;
}
}
void AudioContext::markAudioNodeInputDirty(AudioNodeInput* input)
{
ASSERT(isGraphOwner());
m_dirtyAudioNodeInputs.add(input);
}
void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
{
ASSERT(isGraphOwner());
m_dirtyAudioNodeOutputs.add(output);
}
void AudioContext::handleDirtyAudioNodeInputs()
{
ASSERT(isGraphOwner());
for (HashSet<AudioNodeInput*>::iterator i = m_dirtyAudioNodeInputs.begin(); i != m_dirtyAudioNodeInputs.end(); ++i)
(*i)->updateRenderingState();
m_dirtyAudioNodeInputs.clear();
}
void AudioContext::handleDirtyAudioNodeOutputs()
{
ASSERT(isGraphOwner());
for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
(*i)->updateRenderingState();
m_dirtyAudioNodeOutputs.clear();
}
ScriptExecutionContext* AudioContext::scriptExecutionContext() const
{
return document();
}
AudioContext* AudioContext::toAudioContext()
{
return this;
}
void AudioContext::startRendering()
{
destination()->startRendering();
}
void AudioContext::fireCompletionEvent()
{
ASSERT(isMainThread());
if (!isMainThread())
return;
AudioBuffer* renderedBuffer = m_renderTarget.get();
ASSERT(renderedBuffer);
if (!renderedBuffer)
return;
if (hasDocument()) {
dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
}
}
}
#endif // ENABLE(WEB_AUDIO)