RealtimeIncomingAudioSource.cpp [plain text]
#include "config.h"
#include "RealtimeIncomingAudioSource.h"
#if USE(LIBWEBRTC)
#include "AudioStreamDescription.h"
#include "CAAudioStreamDescription.h"
#include "LibWebRTCAudioFormat.h"
#include "MediaTimeAVFoundation.h"
#include "WebAudioBufferList.h"
#include "WebAudioSourceProviderAVFObjC.h"
#include "CoreMediaSoftLink.h"
namespace WebCore {
Ref<RealtimeIncomingAudioSource> RealtimeIncomingAudioSource::create(rtc::scoped_refptr<webrtc::AudioTrackInterface>&& audioTrack, String&& audioTrackId)
{
auto source = adoptRef(*new RealtimeIncomingAudioSource(WTFMove(audioTrack), WTFMove(audioTrackId)));
source->start();
return source;
}
RealtimeIncomingAudioSource::RealtimeIncomingAudioSource(rtc::scoped_refptr<webrtc::AudioTrackInterface>&& audioTrack, String&& audioTrackId)
: RealtimeMediaSource(WTFMove(audioTrackId), RealtimeMediaSource::Type::Audio, String())
, m_audioTrack(WTFMove(audioTrack))
{
notifyMutedChange(!m_audioTrack);
}
RealtimeIncomingAudioSource::~RealtimeIncomingAudioSource()
{
stop();
}
static inline AudioStreamBasicDescription streamDescription(size_t sampleRate, size_t channelCount)
{
AudioStreamBasicDescription streamFormat;
FillOutASBDForLPCM(streamFormat, sampleRate, channelCount, LibWebRTCAudioFormat::sampleSize, LibWebRTCAudioFormat::sampleSize, LibWebRTCAudioFormat::isFloat, LibWebRTCAudioFormat::isBigEndian, LibWebRTCAudioFormat::isNonInterleaved);
return streamFormat;
}
void RealtimeIncomingAudioSource::OnData(const void* audioData, int bitsPerSample, int sampleRate, size_t numberOfChannels, size_t numberOfFrames)
{
if (sampleRate == 16000 && numberOfChannels == 1)
return;
ASSERT(bitsPerSample == 16);
ASSERT(numberOfChannels == 1 || numberOfChannels == 2);
ASSERT(sampleRate == 48000);
CMTime startTime = CMTimeMake(m_numberOfFrames, sampleRate);
auto mediaTime = toMediaTime(startTime);
m_numberOfFrames += numberOfFrames;
AudioStreamBasicDescription newDescription = streamDescription(sampleRate, numberOfChannels);
WebAudioBufferList audioBufferList { CAAudioStreamDescription(newDescription), WTF::safeCast<uint32_t>(numberOfFrames) };
audioBufferList.buffer(0)->mDataByteSize = numberOfChannels * numberOfFrames * bitsPerSample / 8;
audioBufferList.buffer(0)->mNumberChannels = numberOfChannels;
if (muted())
memset(audioBufferList.buffer(0)->mData, 0, audioBufferList.buffer(0)->mDataByteSize);
else
memcpy(audioBufferList.buffer(0)->mData, audioData, audioBufferList.buffer(0)->mDataByteSize);
audioSamplesAvailable(mediaTime, audioBufferList, CAAudioStreamDescription(newDescription), numberOfFrames);
}
void RealtimeIncomingAudioSource::startProducingData()
{
if (m_audioTrack)
m_audioTrack->AddSink(this);
}
void RealtimeIncomingAudioSource::stopProducingData()
{
if (m_audioTrack)
m_audioTrack->RemoveSink(this);
}
void RealtimeIncomingAudioSource::setSourceTrack(rtc::scoped_refptr<webrtc::AudioTrackInterface>&& track)
{
ASSERT(!m_audioTrack);
ASSERT(track);
m_audioTrack = WTFMove(track);
notifyMutedChange(!m_audioTrack);
if (isProducingData())
m_audioTrack->AddSink(this);
}
const RealtimeMediaSourceCapabilities& RealtimeIncomingAudioSource::capabilities() const
{
return RealtimeMediaSourceCapabilities::emptyCapabilities();
}
const RealtimeMediaSourceSettings& RealtimeIncomingAudioSource::settings() const
{
return m_currentSettings;
}
}
#endif // USE(LIBWEBRTC)