FFTFrameFFMPEG.cpp [plain text]
#include "config.h"
#if ENABLE(WEB_AUDIO)
#if USE(WEBAUDIO_FFMPEG)
#include "FFTFrame.h"
#include "VectorMath.h"
extern "C" {
#include <libavcodec/avfft.h>
}
#include <wtf/MathExtras.h>
namespace WebCore {
const int kMaxFFTPow2Size = 24;
FFTFrame::FFTFrame(unsigned fftSize)
: m_FFTSize(fftSize)
, m_log2FFTSize(static_cast<unsigned>(log2(fftSize)))
, m_forwardContext(0)
, m_inverseContext(0)
, m_complexData(fftSize)
, m_realData(fftSize / 2)
, m_imagData(fftSize / 2)
{
ASSERT(1UL << m_log2FFTSize == m_FFTSize);
m_forwardContext = contextForSize(fftSize, DFT_R2C);
m_inverseContext = contextForSize(fftSize, IDFT_C2R);
}
FFTFrame::FFTFrame()
: m_FFTSize(0)
, m_log2FFTSize(0)
, m_forwardContext(0)
, m_inverseContext(0)
{
}
FFTFrame::FFTFrame(const FFTFrame& frame)
: m_FFTSize(frame.m_FFTSize)
, m_log2FFTSize(frame.m_log2FFTSize)
, m_forwardContext(0)
, m_inverseContext(0)
, m_complexData(frame.m_FFTSize)
, m_realData(frame.m_FFTSize / 2)
, m_imagData(frame.m_FFTSize / 2)
{
m_forwardContext = contextForSize(m_FFTSize, DFT_R2C);
m_inverseContext = contextForSize(m_FFTSize, IDFT_C2R);
unsigned nbytes = sizeof(float) * (m_FFTSize / 2);
memcpy(realData(), frame.realData(), nbytes);
memcpy(imagData(), frame.imagData(), nbytes);
}
void FFTFrame::initialize()
{
}
void FFTFrame::cleanup()
{
}
FFTFrame::~FFTFrame()
{
av_rdft_end(m_forwardContext);
av_rdft_end(m_inverseContext);
}
void FFTFrame::multiply(const FFTFrame& frame)
{
FFTFrame& frame1 = *this;
FFTFrame& frame2 = const_cast<FFTFrame&>(frame);
float* realP1 = frame1.realData();
float* imagP1 = frame1.imagData();
const float* realP2 = frame2.realData();
const float* imagP2 = frame2.imagData();
float scale = 0.5f;
realP1[0] *= scale * realP2[0];
imagP1[0] *= scale * imagP2[0];
unsigned halfSize = fftSize() / 2;
for (unsigned i = 1; i < halfSize; ++i) {
float realResult = realP1[i] * realP2[i] - imagP1[i] * imagP2[i];
float imagResult = realP1[i] * imagP2[i] + imagP1[i] * realP2[i];
realP1[i] = scale * realResult;
imagP1[i] = scale * imagResult;
}
}
void FFTFrame::doFFT(float* data)
{
float* p = m_complexData.data();
memcpy(p, data, sizeof(float) * m_FFTSize);
av_rdft_calc(m_forwardContext, p);
int len = m_FFTSize / 2;
const float scale = 2.0f;
for (int i = 0; i < len; ++i) {
int baseComplexIndex = 2 * i;
m_realData[i] = scale * p[baseComplexIndex];
m_imagData[i] = scale * p[baseComplexIndex + 1];
}
}
void FFTFrame::doInverseFFT(float* data)
{
float* interleavedData = getUpToDateComplexData();
av_rdft_calc(m_inverseContext, interleavedData);
const float scale = 1.0 / m_FFTSize;
VectorMath::vsmul(interleavedData, 1, &scale, data, 1, m_FFTSize);
}
float* FFTFrame::realData() const
{
return const_cast<float*>(m_realData.data());
}
float* FFTFrame::imagData() const
{
return const_cast<float*>(m_imagData.data());
}
float* FFTFrame::getUpToDateComplexData()
{
int len = m_FFTSize / 2;
for (int i = 0; i < len; ++i) {
int baseComplexIndex = 2 * i;
m_complexData[baseComplexIndex] = m_realData[i];
m_complexData[baseComplexIndex + 1] = m_imagData[i];
}
return const_cast<float*>(m_complexData.data());
}
RDFTContext* FFTFrame::contextForSize(unsigned fftSize, int trans)
{
ASSERT(fftSize);
int pow2size = static_cast<int>(log2(fftSize));
ASSERT(pow2size < kMaxFFTPow2Size);
RDFTContext* context = av_rdft_init(pow2size, (RDFTransformType)trans);
return context;
}
}
#endif // !OS(DARWIN) && USE(WEBAUDIO_FFMPEG)
#endif // ENABLE(WEB_AUDIO)