#ifndef AudioContext_h
#define AudioContext_h
#include "ActiveDOMObject.h"
#include "AsyncAudioDecoder.h"
#include "AudioBus.h"
#include "AudioDestinationNode.h"
#include "EventListener.h"
#include "EventTarget.h"
#include "MediaCanStartListener.h"
#include <wtf/HashSet.h>
#include <wtf/MainThread.h>
#include <wtf/OwnPtr.h>
#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/RefPtr.h>
#include <wtf/ThreadSafeRefCounted.h>
#include <wtf/Threading.h>
#include <wtf/Vector.h>
#include <wtf/text/AtomicStringHash.h>
namespace WebCore {
class AudioBuffer;
class AudioBufferCallback;
class AudioBufferSourceNode;
class MediaElementAudioSourceNode;
class MediaStreamAudioDestinationNode;
class MediaStreamAudioSourceNode;
class HRTFDatabaseLoader;
class HTMLMediaElement;
class ChannelMergerNode;
class ChannelSplitterNode;
class GainNode;
class PannerNode;
class AudioListener;
class AudioSummingJunction;
class BiquadFilterNode;
class DelayNode;
class Document;
class ConvolverNode;
class DynamicsCompressorNode;
class AnalyserNode;
class WaveShaperNode;
class ScriptProcessorNode;
class OscillatorNode;
class WaveTable;
class AudioContext : public ActiveDOMObject, public ThreadSafeRefCounted<AudioContext>, public EventTarget, public MediaCanStartListener {
public:
static PassRefPtr<AudioContext> create(Document*, ExceptionCode&);
static PassRefPtr<AudioContext> createOfflineContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
virtual ~AudioContext();
bool isInitialized() const;
bool isOfflineContext() { return m_isOfflineContext; }
bool isRunnable() const;
HRTFDatabaseLoader* hrtfDatabaseLoader() const { return m_hrtfDatabaseLoader.get(); }
virtual void stop();
Document* document() const; bool hasDocument();
AudioDestinationNode* destination() { return m_destinationNode.get(); }
size_t currentSampleFrame() const { return m_destinationNode->currentSampleFrame(); }
double currentTime() const { return m_destinationNode->currentTime(); }
float sampleRate() const { return m_destinationNode->sampleRate(); }
unsigned long activeSourceCount() const { return static_cast<unsigned long>(m_activeSourceCount); }
void incrementActiveSourceCount();
void decrementActiveSourceCount();
PassRefPtr<AudioBuffer> createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode&);
PassRefPtr<AudioBuffer> createBuffer(ArrayBuffer*, bool mixToMono, ExceptionCode&);
void decodeAudioData(ArrayBuffer*, PassRefPtr<AudioBufferCallback>, PassRefPtr<AudioBufferCallback>, ExceptionCode& ec);
AudioListener* listener() { return m_listener.get(); }
PassRefPtr<AudioBufferSourceNode> createBufferSource();
#if ENABLE(VIDEO)
PassRefPtr<MediaElementAudioSourceNode> createMediaElementSource(HTMLMediaElement*, ExceptionCode&);
#endif
#if ENABLE(MEDIA_STREAM)
PassRefPtr<MediaStreamAudioSourceNode> createMediaStreamSource(MediaStream*, ExceptionCode&);
PassRefPtr<MediaStreamAudioDestinationNode> createMediaStreamDestination();
#endif
PassRefPtr<GainNode> createGain();
PassRefPtr<BiquadFilterNode> createBiquadFilter();
PassRefPtr<WaveShaperNode> createWaveShaper();
PassRefPtr<DelayNode> createDelay(ExceptionCode&);
PassRefPtr<DelayNode> createDelay(double maxDelayTime, ExceptionCode&);
PassRefPtr<PannerNode> createPanner();
PassRefPtr<ConvolverNode> createConvolver();
PassRefPtr<DynamicsCompressorNode> createDynamicsCompressor();
PassRefPtr<AnalyserNode> createAnalyser();
PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, ExceptionCode&);
PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode&);
PassRefPtr<ScriptProcessorNode> createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode&);
PassRefPtr<ChannelSplitterNode> createChannelSplitter(ExceptionCode&);
PassRefPtr<ChannelSplitterNode> createChannelSplitter(size_t numberOfOutputs, ExceptionCode&);
PassRefPtr<ChannelMergerNode> createChannelMerger(ExceptionCode&);
PassRefPtr<ChannelMergerNode> createChannelMerger(size_t numberOfInputs, ExceptionCode&);
PassRefPtr<OscillatorNode> createOscillator();
PassRefPtr<WaveTable> createWaveTable(Float32Array* real, Float32Array* imag, ExceptionCode&);
void notifyNodeFinishedProcessing(AudioNode*);
void handlePreRenderTasks();
void handlePostRenderTasks();
void derefFinishedSourceNodes();
void markForDeletion(AudioNode*);
void deleteMarkedNodes();
void addAutomaticPullNode(AudioNode*);
void removeAutomaticPullNode(AudioNode*);
void processAutomaticPullNodes(size_t framesToProcess);
void incrementConnectionCount()
{
ASSERT(isMainThread());
m_connectionCount++;
}
unsigned connectionCount() const { return m_connectionCount; }
void setAudioThread(ThreadIdentifier thread) { m_audioThread = thread; } ThreadIdentifier audioThread() const { return m_audioThread; }
bool isAudioThread() const;
bool isAudioThreadFinished() { return m_isAudioThreadFinished; }
void lock(bool& mustReleaseLock);
bool tryLock(bool& mustReleaseLock);
void unlock();
bool isGraphOwner() const;
static unsigned maxNumberOfChannels() { return MaxNumberOfChannels;}
class AutoLocker {
public:
AutoLocker(AudioContext* context)
: m_context(context)
{
ASSERT(context);
context->lock(m_mustReleaseLock);
}
~AutoLocker()
{
if (m_mustReleaseLock)
m_context->unlock();
}
private:
AudioContext* m_context;
bool m_mustReleaseLock;
};
void addDeferredFinishDeref(AudioNode*);
void handleDeferredFinishDerefs();
void markSummingJunctionDirty(AudioSummingJunction*);
void markAudioNodeOutputDirty(AudioNodeOutput*);
void removeMarkedSummingJunction(AudioSummingJunction*);
virtual const AtomicString& interfaceName() const;
virtual ScriptExecutionContext* scriptExecutionContext() const;
virtual EventTargetData* eventTargetData() { return &m_eventTargetData; }
virtual EventTargetData* ensureEventTargetData() { return &m_eventTargetData; }
DEFINE_ATTRIBUTE_EVENT_LISTENER(complete);
using ThreadSafeRefCounted<AudioContext>::ref;
using ThreadSafeRefCounted<AudioContext>::deref;
void startRendering();
void fireCompletionEvent();
static unsigned s_hardwareContextCount;
enum BehaviorRestrictionFlags {
NoRestrictions = 0,
RequireUserGestureForAudioStartRestriction = 1 << 0,
RequirePageConsentForAudioStartRestriction = 1 << 1,
};
typedef unsigned BehaviorRestrictions;
bool userGestureRequiredForAudioStart() const { return m_restrictions & RequireUserGestureForAudioStartRestriction; }
bool pageConsentRequiredForAudioStart() const { return m_restrictions & RequirePageConsentForAudioStartRestriction; }
void addBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions |= restriction; }
void removeBehaviorRestriction(BehaviorRestrictions restriction) { m_restrictions &= ~restriction; }
protected:
explicit AudioContext(Document*);
AudioContext(Document*, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate);
static bool isSampleRateRangeGood(float sampleRate);
private:
void constructCommon();
void lazyInitialize();
void uninitialize();
bool m_isStopScheduled;
static void stopDispatch(void* userData);
void clear();
void scheduleNodeDeletion();
static void deleteMarkedNodesDispatch(void* userData);
virtual void mediaCanStart() OVERRIDE;
bool m_isInitialized;
bool m_isAudioThreadFinished;
void refNode(AudioNode*);
void derefNode(AudioNode*);
void derefUnfinishedSourceNodes();
RefPtr<AudioDestinationNode> m_destinationNode;
RefPtr<AudioListener> m_listener;
Vector<AudioNode*> m_finishedNodes;
Vector<AudioNode*> m_referencedNodes;
Vector<AudioNode*> m_nodesMarkedForDeletion;
Vector<AudioNode*> m_nodesToDelete;
bool m_isDeletionScheduled;
HashSet<AudioSummingJunction*> m_dirtySummingJunctions;
HashSet<AudioNodeOutput*> m_dirtyAudioNodeOutputs;
void handleDirtyAudioSummingJunctions();
void handleDirtyAudioNodeOutputs();
HashSet<AudioNode*> m_automaticPullNodes;
Vector<AudioNode*> m_renderingAutomaticPullNodes;
bool m_automaticPullNodesNeedUpdating;
void updateAutomaticPullNodes();
unsigned m_connectionCount;
Mutex m_contextGraphMutex;
volatile ThreadIdentifier m_audioThread;
volatile ThreadIdentifier m_graphOwnerThread;
Vector<AudioNode*> m_deferredFinishDerefList;
RefPtr<HRTFDatabaseLoader> m_hrtfDatabaseLoader;
virtual void refEventTarget() { ref(); }
virtual void derefEventTarget() { deref(); }
EventTargetData m_eventTargetData;
RefPtr<AudioBuffer> m_renderTarget;
bool m_isOfflineContext;
AsyncAudioDecoder m_audioDecoder;
enum { MaxNumberOfChannels = 32 };
int m_activeSourceCount;
BehaviorRestrictions m_restrictions;
};
}
#endif // AudioContext_h