RealtimeIncomingAudioSourceCocoa.cpp [plain text]
#include "config.h"
#include "RealtimeIncomingAudioSourceCocoa.h"
#if USE(LIBWEBRTC)
#include "AudioStreamDescription.h"
#include "CAAudioStreamDescription.h"
#include "LibWebRTCAudioFormat.h"
#include "WebAudioBufferList.h"
#include "WebAudioSourceProviderAVFObjC.h"
#include <pal/avfoundation/MediaTimeAVFoundation.h>
#include <pal/cf/CoreMediaSoftLink.h>
namespace WebCore {
using namespace PAL;
Ref<RealtimeIncomingAudioSource> RealtimeIncomingAudioSource::create(rtc::scoped_refptr<webrtc::AudioTrackInterface>&& audioTrack, String&& audioTrackId)
{
auto source = RealtimeIncomingAudioSourceCocoa::create(WTFMove(audioTrack), WTFMove(audioTrackId));
source->start();
return WTFMove(source);
}
Ref<RealtimeIncomingAudioSourceCocoa> RealtimeIncomingAudioSourceCocoa::create(rtc::scoped_refptr<webrtc::AudioTrackInterface>&& audioTrack, String&& audioTrackId)
{
return adoptRef(*new RealtimeIncomingAudioSourceCocoa(WTFMove(audioTrack), WTFMove(audioTrackId)));
}
RealtimeIncomingAudioSourceCocoa::RealtimeIncomingAudioSourceCocoa(rtc::scoped_refptr<webrtc::AudioTrackInterface>&& audioTrack, String&& audioTrackId)
: RealtimeIncomingAudioSource(WTFMove(audioTrack), WTFMove(audioTrackId))
{
}
static inline AudioStreamBasicDescription streamDescription(size_t sampleRate, size_t channelCount)
{
AudioStreamBasicDescription streamFormat;
FillOutASBDForLPCM(streamFormat, sampleRate, channelCount, LibWebRTCAudioFormat::sampleSize, LibWebRTCAudioFormat::sampleSize, LibWebRTCAudioFormat::isFloat, LibWebRTCAudioFormat::isBigEndian, LibWebRTCAudioFormat::isNonInterleaved);
return streamFormat;
}
void RealtimeIncomingAudioSourceCocoa::OnData(const void* audioData, int bitsPerSample, int sampleRate, size_t numberOfChannels, size_t numberOfFrames)
{
if (sampleRate == 16000 && numberOfChannels == 1)
return;
ASSERT(bitsPerSample == 16);
ASSERT(numberOfChannels == 1 || numberOfChannels == 2);
ASSERT(sampleRate == 48000);
CMTime startTime = CMTimeMake(m_numberOfFrames, sampleRate);
auto mediaTime = PAL::toMediaTime(startTime);
m_numberOfFrames += numberOfFrames;
AudioStreamBasicDescription newDescription = streamDescription(sampleRate, numberOfChannels);
WebAudioBufferList audioBufferList { CAAudioStreamDescription(newDescription), WTF::safeCast<uint32_t>(numberOfFrames) };
audioBufferList.buffer(0)->mDataByteSize = numberOfChannels * numberOfFrames * bitsPerSample / 8;
audioBufferList.buffer(0)->mNumberChannels = numberOfChannels;
if (muted())
memset(audioBufferList.buffer(0)->mData, 0, audioBufferList.buffer(0)->mDataByteSize);
else
memcpy(audioBufferList.buffer(0)->mData, audioData, audioBufferList.buffer(0)->mDataByteSize);
audioSamplesAvailable(mediaTime, audioBufferList, CAAudioStreamDescription(newDescription), numberOfFrames);
}
}
#endif // USE(LIBWEBRTC)