AudioSampleBufferCompressor.h [plain text]
#pragma once
#if ENABLE(MEDIA_STREAM) && USE(AVFOUNDATION)
#import <CoreMedia/CoreMedia.h>
typedef struct opaqueCMSampleBuffer *CMSampleBufferRef;
typedef struct OpaqueAudioConverter* AudioConverterRef;
namespace WebCore {
class AudioSampleBufferCompressor {
WTF_MAKE_FAST_ALLOCATED;
public:
static std::unique_ptr<AudioSampleBufferCompressor> create(CMBufferQueueTriggerCallback, void* callbackObject);
~AudioSampleBufferCompressor();
void setBitsPerSecond(unsigned);
void finish();
void addSampleBuffer(CMSampleBufferRef);
CMSampleBufferRef getOutputSampleBuffer();
RetainPtr<CMSampleBufferRef> takeOutputSampleBuffer();
unsigned bitRate() const;
private:
AudioSampleBufferCompressor();
bool initialize(CMBufferQueueTriggerCallback, void* callbackObject);
UInt32 defaultOutputBitRate(const AudioStreamBasicDescription&) const;
static OSStatus audioConverterComplexInputDataProc(AudioConverterRef, UInt32*, AudioBufferList*, AudioStreamPacketDescription**, void*);
void processSampleBuffer(CMSampleBufferRef);
bool initAudioConverterForSourceFormatDescription(CMFormatDescriptionRef, AudioFormatID);
size_t computeBufferSizeForAudioFormat(AudioStreamBasicDescription, UInt32, Float32);
void attachPrimingTrimsIfNeeded(CMSampleBufferRef);
RetainPtr<NSNumber> gradualDecoderRefreshCount();
CMSampleBufferRef sampleBufferWithNumPackets(UInt32 numPackets, AudioBufferList);
void processSampleBuffersUntilLowWaterTime(CMTime);
OSStatus provideSourceDataNumOutputPackets(UInt32*, AudioBufferList*, AudioStreamPacketDescription**);
dispatch_queue_t m_serialDispatchQueue;
CMTime m_lowWaterTime { kCMTimeInvalid };
RetainPtr<CMBufferQueueRef> m_outputBufferQueue;
RetainPtr<CMBufferQueueRef> m_inputBufferQueue;
bool m_isEncoding { false };
AudioConverterRef m_converter { nullptr };
AudioStreamBasicDescription m_sourceFormat;
AudioStreamBasicDescription m_destinationFormat;
RetainPtr<CMFormatDescriptionRef> m_destinationFormatDescription;
RetainPtr<NSNumber> m_gdrCountNum;
UInt32 m_maxOutputPacketSize { 0 };
Vector<AudioStreamPacketDescription> m_destinationPacketDescriptions;
CMTime m_currentNativePresentationTimeStamp { kCMTimeInvalid };
CMTime m_currentOutputPresentationTimeStamp { kCMTimeInvalid };
CMTime m_remainingPrimeDuration { kCMTimeInvalid };
Vector<char> m_sourceBuffer;
Vector<char> m_destinationBuffer;
RetainPtr<CMBlockBufferRef> m_sampleBlockBuffer;
size_t m_sampleBlockBufferSize { 0 };
size_t m_currentOffsetInSampleBlockBuffer { 0 };
AudioFormatID m_outputCodecType { kAudioFormatMPEG4AAC };
Optional<unsigned> m_outputBitRate;
};
}
#endif // ENABLE(MEDIA_STREAM) && USE(AVFOUNDATION)