AudioSampleDataSource.h [plain text]
#pragma once
#if ENABLE(MEDIA_STREAM)
#include "AudioSampleBufferList.h"
#include <CoreAudio/CoreAudioTypes.h>
#include <wtf/LoggerHelper.h>
#include <wtf/MediaTime.h>
#include <wtf/RefPtr.h>
#include <wtf/ThreadSafeRefCounted.h>
#include <wtf/text/WTFString.h>
typedef const struct opaqueCMFormatDescription *CMFormatDescriptionRef;
typedef struct opaqueCMSampleBuffer *CMSampleBufferRef;
namespace WebCore {
class CAAudioStreamDescription;
class CARingBuffer;
class AudioSampleDataSource : public ThreadSafeRefCounted<AudioSampleDataSource, WTF::DestructionThread::MainRunLoop>
#if !RELEASE_LOG_DISABLED
, private LoggerHelper
#endif
{
public:
static Ref<AudioSampleDataSource> create(size_t, WTF::LoggerHelper&);
~AudioSampleDataSource();
OSStatus setInputFormat(const CAAudioStreamDescription&);
OSStatus setOutputFormat(const CAAudioStreamDescription&);
void pushSamples(const MediaTime&, const PlatformAudioData&, size_t);
void pushSamples(const AudioStreamBasicDescription&, CMSampleBufferRef);
enum PullMode { Copy, Mix };
bool pullSamples(AudioSampleBufferList&, size_t, uint64_t, double, PullMode);
bool pullSamples(AudioBufferList&, size_t, uint64_t, double, PullMode);
bool pullAvalaibleSamplesAsChunks(AudioBufferList&, size_t frameCount, uint64_t timeStamp, Function<void()>&&);
void setVolume(float volume) { m_volume = volume; }
float volume() const { return m_volume; }
void setMuted(bool muted) { m_muted = muted; }
bool muted() const { return m_muted; }
const CAAudioStreamDescription* inputDescription() const { return m_inputDescription.get(); }
#if !RELEASE_LOG_DISABLED
const Logger& logger() const final { return m_logger; }
const void* logIdentifier() const final { return m_logIdentifier; }
void setLogger(Ref<const Logger>&&, const void*);
#endif
static constexpr float EquivalentToMaxVolume = 0.95;
private:
AudioSampleDataSource(size_t, LoggerHelper&);
OSStatus setupConverter();
bool pullSamplesInternal(AudioBufferList&, size_t&, uint64_t, double, PullMode);
void pushSamplesInternal(const AudioBufferList&, const MediaTime&, size_t frameCount);
std::unique_ptr<CAAudioStreamDescription> m_inputDescription;
std::unique_ptr<CAAudioStreamDescription> m_outputDescription;
MediaTime hostTime() const;
#if !RELEASE_LOG_DISABLED
const char* logClassName() const final { return "AudioSampleDataSource"; }
WTFLogChannel& logChannel() const final;
#endif
uint64_t m_lastPushedSampleCount { 0 };
MediaTime m_expectedNextPushedSampleTime { MediaTime::invalidTime() };
MediaTime m_inputSampleOffset;
int64_t m_outputSampleOffset { 0 };
AudioConverterRef m_converter;
RefPtr<AudioSampleBufferList> m_scratchBuffer;
std::unique_ptr<CARingBuffer> m_ringBuffer;
size_t m_maximumSampleCount { 0 };
float m_volume { 1.0 };
bool m_muted { false };
bool m_shouldComputeOutputSampleOffset { true };
uint64_t m_endFrameWhenNotEnoughData { 0 };
#if !RELEASE_LOG_DISABLED
Ref<const Logger> m_logger;
const void* m_logIdentifier;
#endif
};
}
#endif // ENABLE(MEDIA_STREAM)