RealtimeIncomingAudioSourceCocoa.cpp [plain text]
#include "config.h"
#include "RealtimeIncomingAudioSourceCocoa.h"
#if USE(LIBWEBRTC)
#include "AudioStreamDescription.h"
#include "CAAudioStreamDescription.h"
#include "LibWebRTCAudioFormat.h"
#include "Logging.h"
#include <pal/avfoundation/MediaTimeAVFoundation.h>
#include <pal/cf/CoreMediaSoftLink.h>
namespace WebCore {
using namespace PAL;
Ref<RealtimeIncomingAudioSource> RealtimeIncomingAudioSource::create(rtc::scoped_refptr<webrtc::AudioTrackInterface>&& audioTrack, String&& audioTrackId)
{
auto source = RealtimeIncomingAudioSourceCocoa::create(WTFMove(audioTrack), WTFMove(audioTrackId));
source->start();
return WTFMove(source);
}
Ref<RealtimeIncomingAudioSourceCocoa> RealtimeIncomingAudioSourceCocoa::create(rtc::scoped_refptr<webrtc::AudioTrackInterface>&& audioTrack, String&& audioTrackId)
{
return adoptRef(*new RealtimeIncomingAudioSourceCocoa(WTFMove(audioTrack), WTFMove(audioTrackId)));
}
RealtimeIncomingAudioSourceCocoa::RealtimeIncomingAudioSourceCocoa(rtc::scoped_refptr<webrtc::AudioTrackInterface>&& audioTrack, String&& audioTrackId)
: RealtimeIncomingAudioSource(WTFMove(audioTrack), WTFMove(audioTrackId))
{
}
static inline AudioStreamBasicDescription streamDescription(size_t sampleRate, size_t channelCount)
{
AudioStreamBasicDescription streamFormat;
FillOutASBDForLPCM(streamFormat, sampleRate, channelCount, LibWebRTCAudioFormat::sampleSize, LibWebRTCAudioFormat::sampleSize, LibWebRTCAudioFormat::isFloat, LibWebRTCAudioFormat::isBigEndian, LibWebRTCAudioFormat::isNonInterleaved);
return streamFormat;
}
void RealtimeIncomingAudioSourceCocoa::OnData(const void* audioData, int bitsPerSample, int sampleRate, size_t numberOfChannels, size_t numberOfFrames)
{
#if !RELEASE_LOG_DISABLED
if (!(++m_chunksReceived % 200)) {
callOnMainThread([identifier = LOGIDENTIFIER, this, protectedThis = makeRef(*this), chunksReceived = m_chunksReceived] {
ALWAYS_LOG_IF(loggerPtr(), identifier, "chunk ", chunksReceived);
});
}
#endif
if (!m_audioBufferList || m_sampleRate != sampleRate || m_numberOfChannels != numberOfChannels) {
callOnMainThread([identifier = LOGIDENTIFIER, this, protectedThis = makeRef(*this), sampleRate, numberOfChannels] {
ALWAYS_LOG_IF(loggerPtr(), identifier, "new audio buffer list for sampleRate ", sampleRate, " and ", numberOfChannels, " channel(s)");
});
m_sampleRate = sampleRate;
m_numberOfChannels = numberOfChannels;
m_streamDescription = streamDescription(sampleRate, numberOfChannels);
m_audioBufferList = makeUnique<WebAudioBufferList>(m_streamDescription);
if (m_sampleRate && m_numberOfFrames)
m_numberOfFrames = m_numberOfFrames * sampleRate / m_sampleRate;
else
m_numberOfFrames = 0;
}
CMTime startTime = CMTimeMake(m_numberOfFrames, sampleRate);
auto mediaTime = PAL::toMediaTime(startTime);
m_numberOfFrames += numberOfFrames;
auto& bufferList = *m_audioBufferList->buffer(0);
bufferList.mDataByteSize = numberOfChannels * numberOfFrames * bitsPerSample / 8;
bufferList.mNumberChannels = numberOfChannels;
bufferList.mData = const_cast<void*>(audioData);
audioSamplesAvailable(mediaTime, *m_audioBufferList, m_streamDescription, numberOfFrames);
}
}
#endif // USE(LIBWEBRTC)