AudioPannerNode.cpp [plain text]
#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "AudioPannerNode.h"
#include "AudioBufferSourceNode.h"
#include "AudioBus.h"
#include "AudioContext.h"
#include "AudioNodeInput.h"
#include "AudioNodeOutput.h"
#include "ExceptionCode.h"
#include "HRTFPanner.h"
#include <wtf/MathExtras.h>
using namespace std;
namespace WebCore {
static void fixNANs(double &x)
{
if (isnan(x) || isinf(x))
x = 0.0;
}
AudioPannerNode::AudioPannerNode(AudioContext* context, float sampleRate)
: AudioNode(context, sampleRate)
, m_panningModel(Panner::PanningModelHRTF)
, m_lastGain(-1.0)
, m_connectionCount(0)
{
addInput(adoptPtr(new AudioNodeInput(this)));
addOutput(adoptPtr(new AudioNodeOutput(this, 2)));
m_distanceGain = AudioGain::create("distanceGain", 1.0, 0.0, 1.0);
m_coneGain = AudioGain::create("coneGain", 1.0, 0.0, 1.0);
m_position = FloatPoint3D(0, 0, 0);
m_orientation = FloatPoint3D(1, 0, 0);
m_velocity = FloatPoint3D(0, 0, 0);
setNodeType(NodeTypePanner);
initialize();
}
AudioPannerNode::~AudioPannerNode()
{
uninitialize();
}
void AudioPannerNode::pullInputs(size_t framesToProcess)
{
if (m_connectionCount != context()->connectionCount()) {
m_connectionCount = context()->connectionCount();
notifyAudioSourcesConnectedToNode(this);
}
AudioNode::pullInputs(framesToProcess);
}
void AudioPannerNode::process(size_t framesToProcess)
{
AudioBus* destination = output(0)->bus();
if (!isInitialized() || !input(0)->isConnected() || !m_panner.get()) {
destination->zero();
return;
}
AudioBus* source = input(0)->bus();
if (!source) {
destination->zero();
return;
}
double azimuth;
double elevation;
getAzimuthElevation(&azimuth, &elevation);
m_panner->pan(azimuth, elevation, source, destination, framesToProcess);
double totalGain = distanceConeGain();
if (m_lastGain == -1.0)
m_lastGain = totalGain;
destination->copyWithGainFrom(*destination, &m_lastGain, totalGain);
}
void AudioPannerNode::reset()
{
m_lastGain = -1.0; if (m_panner.get())
m_panner->reset();
}
void AudioPannerNode::initialize()
{
if (isInitialized())
return;
m_panner = Panner::create(m_panningModel, sampleRate());
AudioNode::initialize();
}
void AudioPannerNode::uninitialize()
{
if (!isInitialized())
return;
m_panner.clear();
AudioNode::uninitialize();
}
AudioListener* AudioPannerNode::listener()
{
return context()->listener();
}
void AudioPannerNode::setPanningModel(unsigned short model, ExceptionCode& ec)
{
switch (model) {
case EQUALPOWER:
case HRTF:
if (!m_panner.get() || model != m_panningModel) {
OwnPtr<Panner> newPanner = Panner::create(model, sampleRate());
m_panner = newPanner.release();
m_panningModel = model;
}
break;
case SOUNDFIELD:
default:
ec = NOT_SUPPORTED_ERR;
break;
}
}
void AudioPannerNode::getAzimuthElevation(double* outAzimuth, double* outElevation)
{
double azimuth = 0.0;
FloatPoint3D listenerPosition = listener()->position();
FloatPoint3D sourceListener = m_position - listenerPosition;
if (sourceListener.isZero()) {
*outAzimuth = 0.0;
*outElevation = 0.0;
return;
}
sourceListener.normalize();
FloatPoint3D listenerFront = listener()->orientation();
FloatPoint3D listenerUp = listener()->upVector();
FloatPoint3D listenerRight = listenerFront.cross(listenerUp);
listenerRight.normalize();
FloatPoint3D listenerFrontNorm = listenerFront;
listenerFrontNorm.normalize();
FloatPoint3D up = listenerRight.cross(listenerFrontNorm);
float upProjection = sourceListener.dot(up);
FloatPoint3D projectedSource = sourceListener - upProjection * up;
projectedSource.normalize();
azimuth = 180.0 * acos(projectedSource.dot(listenerRight)) / piDouble;
fixNANs(azimuth);
double frontBack = projectedSource.dot(listenerFrontNorm);
if (frontBack < 0.0)
azimuth = 360.0 - azimuth;
if ((azimuth >= 0.0) && (azimuth <= 270.0))
azimuth = 90.0 - azimuth;
else
azimuth = 450.0 - azimuth;
double elevation = 90.0 - 180.0 * acos(sourceListener.dot(up)) / piDouble;
fixNANs(elevation);
if (elevation > 90.0)
elevation = 180.0 - elevation;
else if (elevation < -90.0)
elevation = -180.0 - elevation;
if (outAzimuth)
*outAzimuth = azimuth;
if (outElevation)
*outElevation = elevation;
}
float AudioPannerNode::dopplerRate()
{
double dopplerShift = 1.0;
double dopplerFactor = listener()->dopplerFactor();
if (dopplerFactor > 0.0) {
double speedOfSound = listener()->speedOfSound();
const FloatPoint3D &sourceVelocity = m_velocity;
const FloatPoint3D &listenerVelocity = listener()->velocity();
bool sourceHasVelocity = !sourceVelocity.isZero();
bool listenerHasVelocity = !listenerVelocity.isZero();
if (sourceHasVelocity || listenerHasVelocity) {
FloatPoint3D listenerPosition = listener()->position();
FloatPoint3D sourceToListener = m_position - listenerPosition;
double sourceListenerMagnitude = sourceToListener.length();
double listenerProjection = sourceToListener.dot(listenerVelocity) / sourceListenerMagnitude;
double sourceProjection = sourceToListener.dot(sourceVelocity) / sourceListenerMagnitude;
listenerProjection = -listenerProjection;
sourceProjection = -sourceProjection;
double scaledSpeedOfSound = speedOfSound / dopplerFactor;
listenerProjection = min(listenerProjection, scaledSpeedOfSound);
sourceProjection = min(sourceProjection, scaledSpeedOfSound);
dopplerShift = ((speedOfSound - dopplerFactor * listenerProjection) / (speedOfSound - dopplerFactor * sourceProjection));
fixNANs(dopplerShift);
if (dopplerShift > 16.0)
dopplerShift = 16.0;
else if (dopplerShift < 0.125)
dopplerShift = 0.125;
}
}
return static_cast<float>(dopplerShift);
}
float AudioPannerNode::distanceConeGain()
{
FloatPoint3D listenerPosition = listener()->position();
double listenerDistance = m_position.distanceTo(listenerPosition);
double distanceGain = m_distanceEffect.gain(listenerDistance);
m_distanceGain->setValue(static_cast<float>(distanceGain));
double coneGain = m_coneEffect.gain(m_position, m_orientation, listenerPosition);
m_coneGain->setValue(static_cast<float>(coneGain));
return float(distanceGain * coneGain);
}
void AudioPannerNode::notifyAudioSourcesConnectedToNode(AudioNode* node)
{
ASSERT(node);
if (!node)
return;
if (node->nodeType() == NodeTypeAudioBufferSource) {
AudioBufferSourceNode* bufferSourceNode = reinterpret_cast<AudioBufferSourceNode*>(node);
bufferSourceNode->setPannerNode(this);
} else {
for (unsigned i = 0; i < node->numberOfInputs(); ++i) {
AudioNodeInput* input = node->input(i);
for (unsigned j = 0; j < input->numberOfRenderingConnections(); ++j) {
AudioNodeOutput* connectedOutput = input->renderingOutput(j);
AudioNode* connectedNode = connectedOutput->node();
notifyAudioSourcesConnectedToNode(connectedNode); }
}
}
}
}
#endif // ENABLE(WEB_AUDIO)