#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "AudioContext.h"
#include "AudioTimestamp.h"
#include "DOMWindow.h"
#include "DefaultAudioDestinationNode.h"
#include "JSDOMPromiseDeferred.h"
#include "Performance.h"
#include <wtf/IsoMallocInlines.h>
#if ENABLE(MEDIA_STREAM)
#include "MediaStream.h"
#include "MediaStreamAudioDestinationNode.h"
#include "MediaStreamAudioSource.h"
#include "MediaStreamAudioSourceNode.h"
#include "MediaStreamAudioSourceOptions.h"
#endif
#if ENABLE(VIDEO)
#include "HTMLMediaElement.h"
#include "MediaElementAudioSourceNode.h"
#include "MediaElementAudioSourceOptions.h"
#endif
namespace WebCore {
#if OS(WINDOWS)
constexpr unsigned maxHardwareContexts = 4;
#endif
WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContext);
ExceptionOr<Ref<AudioContext>> AudioContext::create(Document& document, const AudioContextOptions& contextOptions)
{
ASSERT(isMainThread());
#if OS(WINDOWS)
if (s_hardwareContextCount >= maxHardwareContexts)
return Exception { QuotaExceededError };
#endif
if (!document.isFullyActive())
return Exception { InvalidStateError, "Document is not fully active"_s };
if (contextOptions.sampleRate.hasValue() && !isSupportedSampleRate(contextOptions.sampleRate.value()))
return Exception { SyntaxError, "sampleRate is not in range"_s };
auto audioContext = adoptRef(*new AudioContext(document, contextOptions));
audioContext->suspendIfNeeded();
return audioContext;
}
AudioContext::AudioContext(Document& document, const AudioContextOptions& contextOptions)
: BaseAudioContext(document, contextOptions)
{
}
AudioContext::AudioContext(Document& document, AudioBuffer* renderTarget)
: BaseAudioContext(document, renderTarget)
{
}
double AudioContext::baseLatency()
{
lazyInitialize();
auto* destination = this->destination();
return destination ? static_cast<double>(destination->framesPerBuffer()) / sampleRate() : 0.;
}
AudioTimestamp AudioContext::getOutputTimestamp(DOMWindow& window)
{
if (!destination())
return { 0, 0 };
auto& performance = window.performance();
auto position = outputPosition();
position.position = Seconds { std::min(position.position.seconds(), currentTime()) };
auto performanceTime = performance.relativeTimeFromTimeOriginInReducedResolution(position.timestamp);
performanceTime = std::max(performanceTime, 0.0);
return { position.position.seconds(), performanceTime };
}
void AudioContext::close(DOMPromiseDeferred<void>&& promise)
{
if (isOfflineContext() || isStopped()) {
promise.reject(InvalidStateError);
return;
}
if (state() == State::Closed || !destinationNode()) {
promise.resolve();
return;
}
addReaction(State::Closed, WTFMove(promise));
lazyInitialize();
destinationNode()->close([this, protectedThis = makeRef(*this)] {
setState(State::Closed);
uninitialize();
});
}
DefaultAudioDestinationNode* AudioContext::destination()
{
return static_cast<DefaultAudioDestinationNode*>(BaseAudioContext::destination());
}
#if ENABLE(VIDEO)
ExceptionOr<Ref<MediaElementAudioSourceNode>> AudioContext::createMediaElementSource(HTMLMediaElement& mediaElement)
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return MediaElementAudioSourceNode::create(*this, { &mediaElement });
}
#endif
#if ENABLE(MEDIA_STREAM)
ExceptionOr<Ref<MediaStreamAudioSourceNode>> AudioContext::createMediaStreamSource(MediaStream& mediaStream)
{
ALWAYS_LOG(LOGIDENTIFIER);
ASSERT(isMainThread());
return MediaStreamAudioSourceNode::create(*this, { &mediaStream });
}
ExceptionOr<Ref<MediaStreamAudioDestinationNode>> AudioContext::createMediaStreamDestination()
{
return MediaStreamAudioDestinationNode::create(*this);
}
#endif
}
#endif // ENABLE(WEB_AUDIO)