AudioDestinationGStreamer.cpp [plain text]
#include "config.h"
#if ENABLE(WEB_AUDIO)
#include "AudioDestinationGStreamer.h"
#include "AudioChannel.h"
#include "AudioSourceProvider.h"
#include <wtf/gobject/GOwnPtr.h>
#include "GRefPtrGStreamer.h"
#include "WebKitWebAudioSourceGStreamer.h"
#include <gst/gst.h>
#include <gst/pbutils/pbutils.h>
namespace WebCore {
const unsigned framesToPull = 128;
PassOwnPtr<AudioDestination> AudioDestination::create(AudioSourceProvider& provider, float sampleRate)
{
return adoptPtr(new AudioDestinationGStreamer(provider, sampleRate));
}
float AudioDestination::hardwareSampleRate()
{
return 44100;
}
static void onGStreamerWavparsePadAddedCallback(GstElement* element, GstPad* pad, AudioDestinationGStreamer* destination)
{
destination->finishBuildingPipelineAfterWavParserPadReady(pad);
}
AudioDestinationGStreamer::AudioDestinationGStreamer(AudioSourceProvider& provider, float sampleRate)
: m_provider(provider)
, m_renderBus(2, framesToPull, true)
, m_sampleRate(sampleRate)
, m_isPlaying(false)
{
m_pipeline = gst_pipeline_new("play");
GstElement* webkitAudioSrc = reinterpret_cast<GstElement*>(g_object_new(WEBKIT_TYPE_WEB_AUDIO_SRC,
"rate", sampleRate,
"bus", &m_renderBus,
"provider", &m_provider,
"frames", framesToPull, NULL));
GstElement* wavParser = gst_element_factory_make("wavparse", 0);
m_wavParserAvailable = wavParser;
ASSERT_WITH_MESSAGE(m_wavParserAvailable, "Failed to create GStreamer wavparse element");
if (!m_wavParserAvailable)
return;
g_signal_connect(wavParser, "pad-added", G_CALLBACK(onGStreamerWavparsePadAddedCallback), this);
gst_bin_add_many(GST_BIN(m_pipeline), webkitAudioSrc, wavParser, NULL);
gst_element_link_pads_full(webkitAudioSrc, "src", wavParser, "sink", GST_PAD_LINK_CHECK_NOTHING);
}
AudioDestinationGStreamer::~AudioDestinationGStreamer()
{
gst_element_set_state(m_pipeline, GST_STATE_NULL);
gst_object_unref(m_pipeline);
}
void AudioDestinationGStreamer::finishBuildingPipelineAfterWavParserPadReady(GstPad* pad)
{
ASSERT(m_wavParserAvailable);
GRefPtr<GstElement> audioSink = gst_element_factory_make("autoaudiosink", 0);
m_audioSinkAvailable = audioSink;
if (!audioSink) {
LOG_ERROR("Failed to create GStreamer autoaudiosink element");
return;
}
GstStateChangeReturn stateChangeReturn = gst_element_set_state(audioSink.get(), GST_STATE_READY);
if (stateChangeReturn == GST_STATE_CHANGE_FAILURE) {
LOG_ERROR("Failed to change autoaudiosink element state");
gst_element_set_state(audioSink.get(), GST_STATE_NULL);
m_audioSinkAvailable = false;
return;
}
GstElement* audioConvert = gst_element_factory_make("audioconvert", 0);
gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioSink.get(), NULL);
GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(audioConvert, "sink"));
gst_pad_link(pad, sinkPad.get());
gst_element_link_pads_full(audioConvert, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING);
gst_element_sync_state_with_parent(audioConvert);
gst_element_sync_state_with_parent(audioSink.leakRef());
}
void AudioDestinationGStreamer::start()
{
ASSERT(m_wavParserAvailable);
if (!m_wavParserAvailable)
return;
gst_element_set_state(m_pipeline, GST_STATE_PLAYING);
m_isPlaying = true;
}
void AudioDestinationGStreamer::stop()
{
ASSERT(m_wavParserAvailable && m_audioSinkAvailable);
if (!m_wavParserAvailable || m_audioSinkAvailable)
return;
gst_element_set_state(m_pipeline, GST_STATE_PAUSED);
m_isPlaying = false;
}
}
#endif // ENABLE(WEB_AUDIO)