#pragma once
#if ENABLE(WEB_AUDIO) && USE(MEDIATOOLBOX)
#include "AudioStreamDescription.h"
#include "CAAudioStreamDescription.h"
#include <JavaScriptCore/ArrayBuffer.h>
#include <wtf/Lock.h>
#include <wtf/UniqueRef.h>
#include <wtf/Vector.h>
typedef struct AudioBufferList AudioBufferList;
namespace WebCore {
class CARingBufferStorage {
WTF_MAKE_FAST_ALLOCATED;
public:
virtual ~CARingBufferStorage() = default;
virtual void allocate(size_t, const CAAudioStreamDescription& format, size_t frameCount) = 0;
virtual void deallocate() = 0;
virtual void* data() = 0;
virtual void getCurrentFrameBounds(uint64_t& startTime, uint64_t& endTime) = 0;
virtual void setCurrentFrameBounds(uint64_t startFrame, uint64_t endFrame) = 0;
virtual void updateFrameBounds() { }
virtual uint64_t currentStartFrame() const = 0;
virtual uint64_t currentEndFrame() const = 0;
virtual void flush() = 0;
};
class CARingBufferStorageVector final : public CARingBufferStorage {
public:
CARingBufferStorageVector();
~CARingBufferStorageVector() = default;
private:
void allocate(size_t byteCount, const CAAudioStreamDescription&, size_t) final { m_buffer.grow(byteCount); }
void deallocate() final { m_buffer.clear(); }
void* data() final { return m_buffer.data(); }
void getCurrentFrameBounds(uint64_t& startTime, uint64_t& endTime) final;
void setCurrentFrameBounds(uint64_t startFrame, uint64_t endFrame) final;
uint64_t currentStartFrame() const final;
uint64_t currentEndFrame() const final;
void flush() final;
struct TimeBounds {
TimeBounds()
: m_startFrame(0)
, m_endFrame(0)
, m_updateCounter(0)
{
}
volatile uint64_t m_startFrame;
volatile uint64_t m_endFrame;
volatile uint32_t m_updateCounter;
};
Vector<uint8_t> m_buffer;
Vector<TimeBounds> m_timeBoundsQueue;
Lock m_currentFrameBoundsLock;
std::atomic<int32_t> m_timeBoundsQueuePtr { 0 };
};
class CARingBuffer {
WTF_MAKE_FAST_ALLOCATED;
public:
WEBCORE_EXPORT CARingBuffer();
WEBCORE_EXPORT explicit CARingBuffer(UniqueRef<CARingBufferStorage>&&);
WEBCORE_EXPORT CARingBuffer(UniqueRef<CARingBufferStorage>&&, const CAAudioStreamDescription&, size_t frameCount);
WEBCORE_EXPORT ~CARingBuffer();
enum Error {
Ok,
TooMuch, };
WEBCORE_EXPORT void allocate(const CAAudioStreamDescription&, size_t frameCount);
WEBCORE_EXPORT void deallocate();
WEBCORE_EXPORT Error store(const AudioBufferList*, size_t frameCount, uint64_t startFrame);
enum FetchMode { Copy, Mix };
WEBCORE_EXPORT bool fetchIfHasEnoughData(AudioBufferList*, size_t frameCount, uint64_t startFrame, FetchMode = Copy);
WEBCORE_EXPORT void fetch(AudioBufferList*, size_t frameCount, uint64_t startFrame, FetchMode = Copy);
WEBCORE_EXPORT void flush();
WEBCORE_EXPORT void getCurrentFrameBounds(uint64_t& startFrame, uint64_t& endFrame);
uint32_t channelCount() const { return m_channelCount; }
CARingBufferStorage& storage() { return m_buffers; }
private:
void updateFrameBounds();
size_t frameOffset(uint64_t frameNumber) { return (frameNumber & m_frameCountMask) * m_bytesPerFrame; }
void clipTimeBounds(uint64_t& startRead, uint64_t& endRead);
void setCurrentFrameBounds(uint64_t startFrame, uint64_t endFrame);
void getCurrentFrameBoundsWithoutUpdate(uint64_t& startFrame, uint64_t& endFrame);
void fetchInternal(AudioBufferList*, size_t frameCount, uint64_t startFrame, FetchMode);
uint64_t currentStartFrame() const;
uint64_t currentEndFrame() const;
UniqueRef<CARingBufferStorage> m_buffers;
Vector<Byte*> m_pointers;
uint32_t m_channelCount { 0 };
size_t m_bytesPerFrame { 0 };
uint32_t m_frameCount { 0 };
uint32_t m_frameCountMask { 0 };
size_t m_capacityBytes { 0 };
CAAudioStreamDescription m_description;
};
}
#endif // ENABLE(WEB_AUDIO) && USE(MEDIATOOLBOX)