#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "HRTFElevation.h"
#include "AudioBus.h"
#include "AudioFileReader.h"
#include "Biquad.h"
#include "FFTFrame.h"
#include "HRTFDatabaseLoader.h"
#include "HRTFPanner.h"
#include <algorithm>
#include <math.h>
#include <wtf/NeverDestroyed.h>
namespace WebCore {
const unsigned HRTFElevation::AzimuthSpacing = 15;
const unsigned HRTFElevation::NumberOfRawAzimuths = 360 / AzimuthSpacing;
const unsigned HRTFElevation::InterpolationFactor = 8;
const unsigned HRTFElevation::NumberOfTotalAzimuths = NumberOfRawAzimuths * InterpolationFactor;
const size_t TotalNumberOfResponses = 240;
const size_t ResponseFrameSize = 256;
const float ResponseSampleRate = 44100;
#if PLATFORM(COCOA) || USE(WEBAUDIO_GSTREAMER)
#define USE_CONCATENATED_IMPULSE_RESPONSES
#endif
#ifdef USE_CONCATENATED_IMPULSE_RESPONSES
static AudioBus* getConcatenatedImpulseResponsesForSubject(const String& subjectName)
{
typedef HashMap<String, AudioBus*> AudioBusMap;
static NeverDestroyed<AudioBusMap> audioBusMap;
AudioBus* bus;
AudioBusMap::iterator iterator = audioBusMap.get().find(subjectName);
if (iterator == audioBusMap.get().end()) {
auto concatenatedImpulseResponses = AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate);
ASSERT(concatenatedImpulseResponses);
if (!concatenatedImpulseResponses)
return 0;
bus = concatenatedImpulseResponses.leakRef();
audioBusMap.get().set(subjectName, bus);
} else
bus = iterator->value;
size_t responseLength = bus->length();
size_t expectedLength = static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize);
bool isBusGood = responseLength == expectedLength && bus->numberOfChannels() == 2;
ASSERT(isBusGood);
if (!isBusGood)
return 0;
return bus;
}
#endif
bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
{
RefPtr<HRTFKernel> kernelL1;
RefPtr<HRTFKernel> kernelR1;
bool success = calculateKernelsForAzimuthElevation(azimuth, elevation, sampleRate, subjectName, kernelL1, kernelR1);
if (!success)
return false;
int symmetricAzimuth = !azimuth ? 0 : 360 - azimuth;
RefPtr<HRTFKernel> kernelL2;
RefPtr<HRTFKernel> kernelR2;
success = calculateKernelsForAzimuthElevation(symmetricAzimuth, elevation, sampleRate, subjectName, kernelL2, kernelR2);
if (!success)
return false;
kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5f);
kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5f);
return true;
}
bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
{
bool isAzimuthGood = azimuth >= 0 && azimuth <= 345 && (azimuth / 15) * 15 == azimuth;
ASSERT(isAzimuthGood);
if (!isAzimuthGood)
return false;
bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
ASSERT(isElevationGood);
if (!isElevationGood)
return false;
int positiveElevation = elevation < 0 ? elevation + 360 : elevation;
#ifdef USE_CONCATENATED_IMPULSE_RESPONSES
AudioBus* bus(getConcatenatedImpulseResponsesForSubject(subjectName));
if (!bus)
return false;
int elevationIndex = positiveElevation / AzimuthSpacing;
if (positiveElevation > 90)
elevationIndex -= AzimuthSpacing;
unsigned index = ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) + elevationIndex;
bool isIndexGood = index < TotalNumberOfResponses;
ASSERT(isIndexGood);
if (!isIndexGood)
return false;
unsigned startFrame = index * ResponseFrameSize;
unsigned stopFrame = startFrame + ResponseFrameSize;
RefPtr<AudioBus> preSampleRateConvertedResponse = AudioBus::createBufferFromRange(bus, startFrame, stopFrame);
RefPtr<AudioBus> response = AudioBus::createBySampleRateConverting(preSampleRateConvertedResponse.get(), false, sampleRate);
AudioChannel* leftEarImpulseResponse = response->channel(AudioBus::ChannelLeft);
AudioChannel* rightEarImpulseResponse = response->channel(AudioBus::ChannelRight);
#else
String resourceName = String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectName.utf8().data(), azimuth, positiveElevation);
RefPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate));
ASSERT(impulseResponse.get());
if (!impulseResponse.get())
return false;
size_t responseLength = impulseResponse->length();
size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0));
bool isBusGood = responseLength == expectedLength && impulseResponse->numberOfChannels() == 2;
ASSERT(isBusGood);
if (!isBusGood)
return false;
AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelLeft);
AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelRight);
#endif
const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate);
kernelL = HRTFKernel::create(leftEarImpulseResponse, fftSize, sampleRate);
kernelR = HRTFKernel::create(rightEarImpulseResponse, fftSize, sampleRate);
return true;
}
static const int maxElevations[] = {
90, 45, 60, 45, 75, 45, 60, 45, 75, 45, 60, 45, 75, 45, 60, 45, 75, 45, 60, 45, 75, 45, 60, 45 };
std::unique_ptr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate)
{
bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
ASSERT(isElevationGood);
if (!isElevationGood)
return nullptr;
auto kernelListL = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths);
auto kernelListR = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths);
int interpolatedIndex = 0;
for (unsigned rawIndex = 0; rawIndex < NumberOfRawAzimuths; ++rawIndex) {
int maxElevation = maxElevations[rawIndex];
int actualElevation = std::min(elevation, maxElevation);
bool success = calculateKernelsForAzimuthElevation(rawIndex * AzimuthSpacing, actualElevation, sampleRate, subjectName, kernelListL->at(interpolatedIndex), kernelListR->at(interpolatedIndex));
if (!success)
return nullptr;
interpolatedIndex += InterpolationFactor;
}
for (unsigned i = 0; i < NumberOfTotalAzimuths; i += InterpolationFactor) {
int j = (i + InterpolationFactor) % NumberOfTotalAzimuths;
for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
float x = float(jj) / float(InterpolationFactor);
(*kernelListL)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListL->at(i).get(), kernelListL->at(j).get(), x);
(*kernelListR)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListR->at(i).get(), kernelListR->at(j).get(), x);
}
}
return std::make_unique<HRTFElevation>(WTFMove(kernelListL), WTFMove(kernelListR), elevation, sampleRate);
}
std::unique_ptr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate)
{
ASSERT(hrtfElevation1 && hrtfElevation2);
if (!hrtfElevation1 || !hrtfElevation2)
return nullptr;
ASSERT(x >= 0.0 && x < 1.0);
auto kernelListL = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths);
auto kernelListR = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths);
HRTFKernelList* kernelListL1 = hrtfElevation1->kernelListL();
HRTFKernelList* kernelListR1 = hrtfElevation1->kernelListR();
HRTFKernelList* kernelListL2 = hrtfElevation2->kernelListL();
HRTFKernelList* kernelListR2 = hrtfElevation2->kernelListR();
for (unsigned i = 0; i < NumberOfTotalAzimuths; ++i) {
(*kernelListL)[i] = HRTFKernel::createInterpolatedKernel(kernelListL1->at(i).get(), kernelListL2->at(i).get(), x);
(*kernelListR)[i] = HRTFKernel::createInterpolatedKernel(kernelListR1->at(i).get(), kernelListR2->at(i).get(), x);
}
double angle = (1.0 - x) * hrtfElevation1->elevationAngle() + x * hrtfElevation2->elevationAngle();
return std::make_unique<HRTFElevation>(WTFMove(kernelListL), WTFMove(kernelListR), static_cast<int>(angle), sampleRate);
}
void HRTFElevation::getKernelsFromAzimuth(double azimuthBlend, unsigned azimuthIndex, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR)
{
bool checkAzimuthBlend = azimuthBlend >= 0.0 && azimuthBlend < 1.0;
ASSERT(checkAzimuthBlend);
if (!checkAzimuthBlend)
azimuthBlend = 0.0;
unsigned numKernels = m_kernelListL->size();
bool isIndexGood = azimuthIndex < numKernels;
ASSERT(isIndexGood);
if (!isIndexGood) {
kernelL = 0;
kernelR = 0;
return;
}
kernelL = m_kernelListL->at(azimuthIndex).get();
kernelR = m_kernelListR->at(azimuthIndex).get();
frameDelayL = m_kernelListL->at(azimuthIndex)->frameDelay();
frameDelayR = m_kernelListR->at(azimuthIndex)->frameDelay();
int azimuthIndex2 = (azimuthIndex + 1) % numKernels;
double frameDelay2L = m_kernelListL->at(azimuthIndex2)->frameDelay();
double frameDelay2R = m_kernelListR->at(azimuthIndex2)->frameDelay();
frameDelayL = (1.0 - azimuthBlend) * frameDelayL + azimuthBlend * frameDelay2L;
frameDelayR = (1.0 - azimuthBlend) * frameDelayR + azimuthBlend * frameDelay2R;
}
}
#endif // ENABLE(WEB_AUDIO)