#ifndef FFTFrame_h
#define FFTFrame_h
#include "AudioArray.h"
#if OS(DARWIN) && !USE(WEBAUDIO_FFMPEG)
#define USE_ACCELERATE_FFT 1
#else
#define USE_ACCELERATE_FFT 0
#endif
#if USE_ACCELERATE_FFT
#include <Accelerate/Accelerate.h>
#endif
#if !USE_ACCELERATE_FFT
#if USE(WEBAUDIO_MKL)
#include "mkl_dfti.h"
#endif // USE(WEBAUDIO_MKL)
#if USE(WEBAUDIO_GSTREAMER)
#include <glib.h>
G_BEGIN_DECLS
#include <gst/fft/gstfftf32.h>
G_END_DECLS
#endif // USE(WEBAUDIO_GSTREAMER)
#if USE(WEBAUDIO_FFMPEG)
struct RDFTContext;
#endif // USE(WEBAUDIO_FFMPEG)
#endif // !USE_ACCELERATE_FFT
#if USE(WEBAUDIO_IPP)
#include <ipps.h>
#endif // USE(WEBAUDIO_IPP)
#include <wtf/PassOwnPtr.h>
#include <wtf/Platform.h>
#include <wtf/Threading.h>
namespace WebCore {
class FFTFrame {
public:
FFTFrame(unsigned fftSize);
FFTFrame(); FFTFrame(const FFTFrame& frame);
~FFTFrame();
static void initialize();
static void cleanup();
void doFFT(const float* data);
void doInverseFFT(float* data);
void multiply(const FFTFrame& frame);
float* realData() const;
float* imagData() const;
void print();
static PassOwnPtr<FFTFrame> createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x);
void doPaddedFFT(const float* data, size_t dataSize); double extractAverageGroupDelay();
void addConstantGroupDelay(double sampleFrameDelay);
unsigned fftSize() const { return m_FFTSize; }
unsigned log2FFTSize() const { return m_log2FFTSize; }
private:
unsigned m_FFTSize;
unsigned m_log2FFTSize;
void interpolateFrequencyComponents(const FFTFrame& frame1, const FFTFrame& frame2, double x);
#if USE_ACCELERATE_FFT
DSPSplitComplex& dspSplitComplex() { return m_frame; }
DSPSplitComplex dspSplitComplex() const { return m_frame; }
static FFTSetup fftSetupForSize(unsigned fftSize);
static FFTSetup* fftSetups;
FFTSetup m_FFTSetup;
DSPSplitComplex m_frame;
AudioFloatArray m_realData;
AudioFloatArray m_imagData;
#else // !USE_ACCELERATE_FFT
#if USE(WEBAUDIO_MKL)
float* getUpToDateComplexData();
static DFTI_DESCRIPTOR_HANDLE descriptorHandleForSize(unsigned fftSize);
static DFTI_DESCRIPTOR_HANDLE* descriptorHandles;
DFTI_DESCRIPTOR_HANDLE m_handle;
AudioFloatArray m_complexData;
AudioFloatArray m_realData;
AudioFloatArray m_imagData;
#endif // USE(WEBAUDIO_MKL)
#if USE(WEBAUDIO_FFMPEG)
static RDFTContext* contextForSize(unsigned fftSize, int trans);
RDFTContext* m_forwardContext;
RDFTContext* m_inverseContext;
float* getUpToDateComplexData();
AudioFloatArray m_complexData;
AudioFloatArray m_realData;
AudioFloatArray m_imagData;
#endif // USE(WEBAUDIO_FFMPEG)
#if USE(WEBAUDIO_GSTREAMER)
GstFFTF32* m_fft;
GstFFTF32* m_inverseFft;
GstFFTF32Complex* m_complexData;
AudioFloatArray m_realData;
AudioFloatArray m_imagData;
#endif // USE(WEBAUDIO_GSTREAMER)
#if USE(WEBAUDIO_IPP)
Ipp8u* m_buffer;
IppsDFTSpec_R_32f* m_DFTSpec;
float* getUpToDateComplexData();
AudioFloatArray m_complexData;
AudioFloatArray m_realData;
AudioFloatArray m_imagData;
#endif // USE(WEBAUDIO_IPP)
#endif // !USE_ACCELERATE_FFT
};
}
#endif // FFTFrame_h