AudioDestinationIOS.cpp [plain text]
#include "config.h"
#if ENABLE(WEB_AUDIO)
#if PLATFORM(IOS)
#include "AudioDestinationIOS.h"
#include "AudioIOCallback.h"
#include "AudioSession.h"
#include "FloatConversion.h"
#include "Logging.h"
#include "Page.h"
#include "SoftLinking.h"
#include <AudioToolbox/AudioServices.h>
#include <WebCore/RuntimeApplicationChecksIOS.h>
#include <wtf/HashSet.h>
SOFT_LINK_FRAMEWORK(AudioToolbox)
SOFT_LINK(AudioToolbox, AudioComponentFindNext, AudioComponent, (AudioComponent inComponent, const AudioComponentDescription *inDesc), (inComponent, inDesc))
SOFT_LINK(AudioToolbox, AudioComponentInstanceDispose, OSStatus, (AudioComponentInstance inInstance), (inInstance))
SOFT_LINK(AudioToolbox, AudioComponentInstanceNew, OSStatus, (AudioComponent inComponent, AudioComponentInstance *outInstance), (inComponent, outInstance))
SOFT_LINK(AudioToolbox, AudioOutputUnitStart, OSStatus, (AudioUnit ci), (ci))
SOFT_LINK(AudioToolbox, AudioOutputUnitStop, OSStatus, (AudioUnit ci), (ci))
SOFT_LINK(AudioToolbox, AudioUnitAddPropertyListener, OSStatus, (AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void *inProcUserData), (inUnit, inID, inProc, inProcUserData))
SOFT_LINK(AudioToolbox, AudioUnitGetProperty, OSStatus, (AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void *outData, UInt32 *ioDataSize), (inUnit, inID, inScope, inElement, outData, ioDataSize))
SOFT_LINK(AudioToolbox, AudioUnitInitialize, OSStatus, (AudioUnit inUnit), (inUnit))
SOFT_LINK(AudioToolbox, AudioUnitSetProperty, OSStatus, (AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void *inData, UInt32 inDataSize), (inUnit, inID, inScope, inElement, inData, inDataSize))
namespace WebCore {
const int kRenderBufferSize = 128;
const int kPreferredBufferSize = 256;
typedef HashSet<AudioDestinationIOS*> AudioDestinationSet;
static AudioDestinationSet& audioDestinations()
{
DEFINE_STATIC_LOCAL(AudioDestinationSet, audioDestinationSet, ());
return audioDestinationSet;
}
PassOwnPtr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate)
{
if (numberOfInputChannels)
LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled input channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
if (numberOfOutputChannels != 2)
LOG(Media, "AudioDestination::create(%u, %u, %f) - unhandled output channels", numberOfInputChannels, numberOfOutputChannels, sampleRate);
return adoptPtr(new AudioDestinationIOS(callback, sampleRate));
}
float AudioDestination::hardwareSampleRate()
{
return AudioSession::sharedSession().sampleRate();
}
unsigned long AudioDestination::maxChannelCount()
{
return 0;
}
AudioDestinationIOS::AudioDestinationIOS(AudioIOCallback& callback, double sampleRate)
: m_outputUnit(0)
, m_callback(callback)
, m_renderBus(AudioBus::create(2, kRenderBufferSize, false))
, m_sampleRate(sampleRate)
, m_isPlaying(false)
, m_interruptedOnPlayback(false)
{
AudioSession& session = AudioSession::sharedSession();
session.addListener(this);
session.setCategory(AudioSession::AmbientSound);
audioDestinations().add(this);
if (audioDestinations().size() == 1)
session.setActive(1);
AudioComponent comp;
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
comp = AudioComponentFindNext(0, &desc);
ASSERT(comp);
OSStatus result = AudioComponentInstanceNew(comp, &m_outputUnit);
ASSERT(!result);
UInt32 flag = 1;
result = AudioUnitSetProperty(m_outputUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
0,
&flag,
sizeof(flag));
ASSERT(!result);
result = AudioUnitAddPropertyListener(m_outputUnit, kAudioUnitProperty_MaximumFramesPerSlice, frameSizeChangedProc, this);
ASSERT(!result);
result = AudioUnitInitialize(m_outputUnit);
ASSERT(!result);
configure();
}
AudioDestinationIOS::~AudioDestinationIOS()
{
audioDestinations().remove(this);
if (!audioDestinations().size())
AudioSession::sharedSession().setActive(0);
if (m_outputUnit)
AudioComponentInstanceDispose(m_outputUnit);
}
void AudioDestinationIOS::configure()
{
AURenderCallbackStruct input;
input.inputProc = inputProc;
input.inputProcRefCon = this;
OSStatus result = AudioUnitSetProperty(m_outputUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &input, sizeof(input));
ASSERT(!result);
AudioStreamBasicDescription streamFormat;
UInt32 size = sizeof(AudioStreamBasicDescription);
result = AudioUnitGetProperty(m_outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, (void*)&streamFormat, &size);
ASSERT(!result);
const int bytesPerFloat = sizeof(Float32);
const int bitsPerByte = 8;
streamFormat.mSampleRate = m_sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags = kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = bytesPerFloat;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = bytesPerFloat;
streamFormat.mChannelsPerFrame = 2;
streamFormat.mBitsPerChannel = bitsPerByte * bytesPerFloat;
result = AudioUnitSetProperty(m_outputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, (void*)&streamFormat, sizeof(AudioStreamBasicDescription));
ASSERT(!result);
AudioSession::sharedSession().setPreferredBufferSize(kPreferredBufferSize);
}
void AudioDestinationIOS::start()
{
OSStatus result = AudioOutputUnitStart(m_outputUnit);
if (!result)
m_isPlaying = true;
}
void AudioDestinationIOS::stop()
{
OSStatus result = AudioOutputUnitStop(m_outputUnit);
if (!result)
m_isPlaying = false;
}
void AudioDestinationIOS::beganAudioInterruption()
{
if (!m_isPlaying)
return;
stop();
m_interruptedOnPlayback = true;
}
void AudioDestinationIOS::endedAudioInterruption()
{
if (!m_interruptedOnPlayback)
return;
m_interruptedOnPlayback = false;
start();
}
OSStatus AudioDestinationIOS::render(UInt32 numberOfFrames, AudioBufferList* ioData)
{
AudioBuffer* buffers = ioData->mBuffers;
for (UInt32 frameOffset = 0; frameOffset + kRenderBufferSize <= numberOfFrames; frameOffset += kRenderBufferSize) {
UInt32 remainingFrames = std::min<UInt32>(kRenderBufferSize, numberOfFrames - frameOffset);
for (UInt32 i = 0; i < ioData->mNumberBuffers; ++i) {
UInt32 bytesPerFrame = buffers[i].mDataByteSize / numberOfFrames;
UInt32 byteOffset = frameOffset * bytesPerFrame;
float* memory = (float*)((char*)buffers[i].mData + byteOffset);
m_renderBus->setChannelMemory(i, memory, remainingFrames);
}
m_callback.render(0, m_renderBus.get(), remainingFrames);
}
return noErr;
}
OSStatus AudioDestinationIOS::inputProc(void* userData, AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32 , UInt32 numberOfFrames, AudioBufferList* ioData)
{
AudioDestinationIOS* audioOutput = static_cast<AudioDestinationIOS*>(userData);
return audioOutput->render(numberOfFrames, ioData);
}
void AudioDestinationIOS::frameSizeChangedProc(void *inRefCon, AudioUnit, AudioUnitPropertyID, AudioUnitScope, AudioUnitElement)
{
AudioDestinationIOS* audioOutput = static_cast<AudioDestinationIOS*>(inRefCon);
UInt32 bufferSize = 0;
UInt32 dataSize = sizeof(bufferSize);
AudioUnitGetProperty(audioOutput->m_outputUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, (void*)&bufferSize, &dataSize);
fprintf(stderr, ">>>> frameSizeChanged = %lu\n", static_cast<unsigned long>(bufferSize));
}
}
#endif // PLATFORM(IOS)
#endif // ENABLE(WEB_AUDIO)