AudioSourceProviderGStreamer.cpp [plain text]
#include "config.h"
#include "AudioSourceProviderGStreamer.h"
#if ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER)
#include "AudioBus.h"
#include "AudioSourceProviderClient.h"
#include <gst/app/gstappsink.h>
#include <gst/audio/audio-info.h>
#include <gst/base/gstadapter.h>
#include <wtf/glib/GMutexLocker.h>
namespace WebCore {
static const int gNumberOfChannels = 2;
static const float gSampleBitRate = 44100;
static GstFlowReturn onAppsinkNewBufferCallback(GstAppSink* sink, gpointer userData)
{
return static_cast<AudioSourceProviderGStreamer*>(userData)->handleAudioBuffer(sink);
}
static void onGStreamerDeinterleavePadAddedCallback(GstElement*, GstPad* pad, AudioSourceProviderGStreamer* provider)
{
provider->handleNewDeinterleavePad(pad);
}
static void onGStreamerDeinterleaveReadyCallback(GstElement*, AudioSourceProviderGStreamer* provider)
{
provider->deinterleavePadsConfigured();
}
static void onGStreamerDeinterleavePadRemovedCallback(GstElement*, GstPad* pad, AudioSourceProviderGStreamer* provider)
{
provider->handleRemovedDeinterleavePad(pad);
}
static GstPadProbeReturn onAppsinkFlushCallback(GstPad*, GstPadProbeInfo* info, gpointer userData)
{
if (GST_PAD_PROBE_INFO_TYPE(info) & (GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM | GST_PAD_PROBE_TYPE_EVENT_FLUSH)) {
GstEvent* event = GST_PAD_PROBE_INFO_EVENT(info);
if (GST_EVENT_TYPE(event) == GST_EVENT_FLUSH_STOP) {
AudioSourceProviderGStreamer* provider = reinterpret_cast<AudioSourceProviderGStreamer*>(userData);
provider->clearAdapters();
}
}
return GST_PAD_PROBE_OK;
}
static void copyGStreamerBuffersToAudioChannel(GstAdapter* adapter, AudioBus* bus , int channelNumber, size_t framesToProcess)
{
if (!gst_adapter_available(adapter)) {
bus->zero();
return;
}
size_t bytes = framesToProcess * sizeof(float);
if (gst_adapter_available(adapter) >= bytes) {
gst_adapter_copy(adapter, bus->channel(channelNumber)->mutableData(), 0, bytes);
gst_adapter_flush(adapter, bytes);
}
}
AudioSourceProviderGStreamer::AudioSourceProviderGStreamer()
: m_client(0)
, m_deinterleaveSourcePads(0)
, m_deinterleavePadAddedHandlerId(0)
, m_deinterleaveNoMorePadsHandlerId(0)
, m_deinterleavePadRemovedHandlerId(0)
{
g_mutex_init(&m_adapterMutex);
m_frontLeftAdapter = gst_adapter_new();
m_frontRightAdapter = gst_adapter_new();
}
AudioSourceProviderGStreamer::~AudioSourceProviderGStreamer()
{
GRefPtr<GstElement> deinterleave = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "deinterleave"));
if (deinterleave) {
g_signal_handler_disconnect(deinterleave.get(), m_deinterleavePadAddedHandlerId);
g_signal_handler_disconnect(deinterleave.get(), m_deinterleaveNoMorePadsHandlerId);
g_signal_handler_disconnect(deinterleave.get(), m_deinterleavePadRemovedHandlerId);
}
g_object_unref(m_frontLeftAdapter);
g_object_unref(m_frontRightAdapter);
g_mutex_clear(&m_adapterMutex);
}
void AudioSourceProviderGStreamer::configureAudioBin(GstElement* audioBin, GstElement* teePredecessor)
{
m_audioSinkBin = audioBin;
GstElement* audioTee = gst_element_factory_make("tee", "audioTee");
GstElement* audioQueue = gst_element_factory_make("queue", 0);
GstElement* audioConvert = gst_element_factory_make("audioconvert", 0);
GstElement* audioConvert2 = gst_element_factory_make("audioconvert", 0);
GstElement* audioResample = gst_element_factory_make("audioresample", 0);
GstElement* audioResample2 = gst_element_factory_make("audioresample", 0);
GstElement* volumeElement = gst_element_factory_make("volume", "volume");
GstElement* audioSink = gst_element_factory_make("autoaudiosink", 0);
gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), audioTee, audioQueue, audioConvert, audioResample, volumeElement, audioConvert2, audioResample2, audioSink, nullptr);
if (teePredecessor)
gst_element_link_pads_full(teePredecessor, "src", audioTee, "sink", GST_PAD_LINK_CHECK_NOTHING);
else {
GRefPtr<GstPad> audioTeeSinkPad = adoptGRef(gst_element_get_static_pad(audioTee, "sink"));
gst_element_add_pad(m_audioSinkBin.get(), gst_ghost_pad_new("sink", audioTeeSinkPad.get()));
}
gst_element_link_pads_full(audioTee, "src_%u", audioQueue, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioQueue, "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioResample, "src", volumeElement, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(volumeElement, "src", audioConvert2, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioConvert2, "src", audioResample2, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioResample2, "src", audioSink, "sink", GST_PAD_LINK_CHECK_NOTHING);
}
void AudioSourceProviderGStreamer::provideInput(AudioBus* bus, size_t framesToProcess)
{
WTF::GMutexLocker<GMutex> lock(m_adapterMutex);
copyGStreamerBuffersToAudioChannel(m_frontLeftAdapter, bus, 0, framesToProcess);
copyGStreamerBuffersToAudioChannel(m_frontRightAdapter, bus, 1, framesToProcess);
}
GstFlowReturn AudioSourceProviderGStreamer::handleAudioBuffer(GstAppSink* sink)
{
if (!m_client)
return GST_FLOW_OK;
GRefPtr<GstSample> sample = adoptGRef(gst_app_sink_pull_sample(sink));
if (!sample)
return gst_app_sink_is_eos(sink) ? GST_FLOW_EOS : GST_FLOW_ERROR;
GstBuffer* buffer = gst_sample_get_buffer(sample.get());
if (!buffer)
return GST_FLOW_ERROR;
GstCaps* caps = gst_sample_get_caps(sample.get());
if (!caps)
return GST_FLOW_ERROR;
GstAudioInfo info;
gst_audio_info_from_caps(&info, caps);
WTF::GMutexLocker<GMutex> lock(m_adapterMutex);
switch (GST_AUDIO_INFO_POSITION(&info, 0)) {
case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT:
case GST_AUDIO_CHANNEL_POSITION_MONO:
gst_adapter_push(m_frontLeftAdapter, gst_buffer_ref(buffer));
break;
case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT:
gst_adapter_push(m_frontRightAdapter, gst_buffer_ref(buffer));
break;
default:
break;
}
return GST_FLOW_OK;
}
void AudioSourceProviderGStreamer::setClient(AudioSourceProviderClient* client)
{
ASSERT(client);
m_client = client;
GRefPtr<GstElement> volumeElement = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "volume"));
g_object_set(volumeElement.get(), "mute", TRUE, nullptr);
GstElement* audioQueue = gst_element_factory_make("queue", 0);
GstElement* audioConvert = gst_element_factory_make("audioconvert", 0);
GstElement* audioResample = gst_element_factory_make("audioresample", 0);
GstElement* capsFilter = gst_element_factory_make("capsfilter", 0);
GstElement* deInterleave = gst_element_factory_make("deinterleave", "deinterleave");
g_object_set(deInterleave, "keep-positions", TRUE, nullptr);
m_deinterleavePadAddedHandlerId = g_signal_connect(deInterleave, "pad-added", G_CALLBACK(onGStreamerDeinterleavePadAddedCallback), this);
m_deinterleaveNoMorePadsHandlerId = g_signal_connect(deInterleave, "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this);
m_deinterleavePadRemovedHandlerId = g_signal_connect(deInterleave, "pad-removed", G_CALLBACK(onGStreamerDeinterleavePadRemovedCallback), this);
GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate),
"channels", G_TYPE_INT, gNumberOfChannels,
"format", G_TYPE_STRING, GST_AUDIO_NE(F32),
"layout", G_TYPE_STRING, "interleaved", nullptr);
g_object_set(capsFilter, "caps", caps, nullptr);
gst_caps_unref(caps);
gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), audioQueue, audioConvert, audioResample, capsFilter, deInterleave, nullptr);
GRefPtr<GstElement> audioTee = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "audioTee"));
gst_element_link_pads_full(audioTee.get(), "src_%u", audioQueue, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioQueue, "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(audioResample, "src", capsFilter, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_link_pads_full(capsFilter, "src", deInterleave, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_sync_state_with_parent(audioQueue);
gst_element_sync_state_with_parent(audioConvert);
gst_element_sync_state_with_parent(audioResample);
gst_element_sync_state_with_parent(capsFilter);
gst_element_sync_state_with_parent(deInterleave);
}
void AudioSourceProviderGStreamer::handleNewDeinterleavePad(GstPad* pad)
{
m_deinterleaveSourcePads++;
if (m_deinterleaveSourcePads > 2) {
g_warning("The AudioSourceProvider supports only mono and stereo audio. Silencing out this new channel.");
GstElement* queue = gst_element_factory_make("queue", 0);
GstElement* sink = gst_element_factory_make("fakesink", 0);
g_object_set(sink, "async", FALSE, nullptr);
gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr);
GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink"));
gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
GQuark quark = g_quark_from_static_string("peer");
g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get());
gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_sync_state_with_parent(queue);
gst_element_sync_state_with_parent(sink);
return;
}
GstElement* queue = gst_element_factory_make("queue", 0);
GstElement* sink = gst_element_factory_make("appsink", 0);
GstAppSinkCallbacks callbacks;
callbacks.eos = 0;
callbacks.new_preroll = 0;
callbacks.new_sample = onAppsinkNewBufferCallback;
gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, 0);
g_object_set(sink, "async", FALSE, nullptr);
GRefPtr<GstCaps> caps = adoptGRef(gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate),
"channels", G_TYPE_INT, 1,
"format", G_TYPE_STRING, GST_AUDIO_NE(F32),
"layout", G_TYPE_STRING, "interleaved", nullptr));
gst_app_sink_set_caps(GST_APP_SINK(sink), caps.get());
gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr);
GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink"));
gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING);
GQuark quark = g_quark_from_static_string("peer");
g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get());
gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING);
sinkPad = adoptGRef(gst_element_get_static_pad(sink, "sink"));
gst_pad_add_probe(sinkPad.get(), GST_PAD_PROBE_TYPE_EVENT_FLUSH, onAppsinkFlushCallback, this, nullptr);
gst_element_sync_state_with_parent(queue);
gst_element_sync_state_with_parent(sink);
}
void AudioSourceProviderGStreamer::handleRemovedDeinterleavePad(GstPad* pad)
{
m_deinterleaveSourcePads--;
GQuark quark = g_quark_from_static_string("peer");
GstPad* sinkPad = reinterpret_cast<GstPad*>(g_object_get_qdata(G_OBJECT(pad), quark));
GRefPtr<GstElement> queue = adoptGRef(gst_pad_get_parent_element(sinkPad));
GRefPtr<GstPad> queueSrcPad = adoptGRef(gst_element_get_static_pad(queue.get(), "src"));
GRefPtr<GstPad> appsinkSinkPad = adoptGRef(gst_pad_get_peer(queueSrcPad.get()));
GRefPtr<GstElement> sink = adoptGRef(gst_pad_get_parent_element(appsinkSinkPad.get()));
gst_element_set_state(sink.get(), GST_STATE_NULL);
gst_element_set_state(queue.get(), GST_STATE_NULL);
gst_element_unlink(queue.get(), sink.get());
gst_bin_remove_many(GST_BIN(m_audioSinkBin.get()), queue.get(), sink.get(), nullptr);
}
void AudioSourceProviderGStreamer::deinterleavePadsConfigured()
{
ASSERT(m_client);
ASSERT(m_deinterleaveSourcePads == gNumberOfChannels);
m_client->setFormat(m_deinterleaveSourcePads, gSampleBitRate);
}
void AudioSourceProviderGStreamer::clearAdapters()
{
WTF::GMutexLocker<GMutex> lock(m_adapterMutex);
gst_adapter_clear(m_frontLeftAdapter);
gst_adapter_clear(m_frontRightAdapter);
}
}
#endif // ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER)