RealtimeOutgoingAudioSource.h [plain text]
#pragma once
#if USE(LIBWEBRTC)
#include "LibWebRTCMacros.h"
#include "MediaStreamTrackPrivate.h"
#include "Timer.h"
ALLOW_UNUSED_PARAMETERS_BEGIN
#include <webrtc/api/media_stream_interface.h>
ALLOW_UNUSED_PARAMETERS_END
#include <wtf/LoggerHelper.h>
#include <wtf/ThreadSafeRefCounted.h>
namespace webrtc {
class AudioTrackInterface;
class AudioTrackSinkInterface;
}
namespace WebCore {
class RealtimeOutgoingAudioSource
: public ThreadSafeRefCounted<RealtimeOutgoingAudioSource, WTF::DestructionThread::Main>
, public webrtc::AudioSourceInterface
, private MediaStreamTrackPrivate::Observer
#if !RELEASE_LOG_DISABLED
, private LoggerHelper
#endif
{
public:
static Ref<RealtimeOutgoingAudioSource> create(Ref<MediaStreamTrackPrivate>&& audioSource);
~RealtimeOutgoingAudioSource();
void start() { observeSource(); }
void stop() { unobserveSource(); }
void setSource(Ref<MediaStreamTrackPrivate>&&);
MediaStreamTrackPrivate& source() const { return m_audioSource.get(); }
protected:
explicit RealtimeOutgoingAudioSource(Ref<MediaStreamTrackPrivate>&&);
bool isSilenced() const { return m_muted || !m_enabled; }
void sendAudioFrames(const void* audioData, int bitsPerSample, int sampleRate, size_t numberOfChannels, size_t numberOfFrames);
#if !RELEASE_LOG_DISABLED
const Logger& logger() const final { return m_audioSource->logger(); }
const void* logIdentifier() const final { return m_audioSource->logIdentifier(); }
const char* logClassName() const final { return "RealtimeOutgoingAudioSource"; }
WTFLogChannel& logChannel() const final;
#endif
private:
void AddSink(webrtc::AudioTrackSinkInterface*) final;
void RemoveSink(webrtc::AudioTrackSinkInterface*) final;
void AddRef() const final { ref(); }
rtc::RefCountReleaseStatus Release() const final
{
auto result = refCount() - 1;
deref();
return result ? rtc::RefCountReleaseStatus::kOtherRefsRemained : rtc::RefCountReleaseStatus::kDroppedLastRef;
}
SourceState state() const final { return kLive; }
bool remote() const final { return false; }
void RegisterObserver(webrtc::ObserverInterface*) final { }
void UnregisterObserver(webrtc::ObserverInterface*) final { }
void observeSource();
void unobserveSource();
void sourceMutedChanged();
void sourceEnabledChanged();
virtual void audioSamplesAvailable(const MediaTime&, const PlatformAudioData&, const AudioStreamDescription&, size_t) { };
virtual bool isReachingBufferedAudioDataHighLimit() { return false; };
virtual bool isReachingBufferedAudioDataLowLimit() { return false; };
virtual bool hasBufferedEnoughData() { return false; };
virtual void sourceUpdated() { }
void trackMutedChanged(MediaStreamTrackPrivate&) final { sourceMutedChanged(); }
void trackEnabledChanged(MediaStreamTrackPrivate&) final { sourceEnabledChanged(); }
void audioSamplesAvailable(MediaStreamTrackPrivate&, const MediaTime& mediaTime, const PlatformAudioData& data, const AudioStreamDescription& description, size_t sampleCount) { audioSamplesAvailable(mediaTime, data, description, sampleCount); }
void trackEnded(MediaStreamTrackPrivate&) final { }
void trackSettingsChanged(MediaStreamTrackPrivate&) final { }
void initializeConverter();
Ref<MediaStreamTrackPrivate> m_audioSource;
bool m_muted { false };
bool m_enabled { true };
mutable RecursiveLock m_sinksLock;
HashSet<webrtc::AudioTrackSinkInterface*> m_sinks;
#if !RELEASE_LOG_DISABLED
size_t m_chunksSent { 0 };
#endif
};
}
#endif // USE(LIBWEBRTC)