diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
commit | 1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch) | |
tree | 46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/WebCore/platform/audio/gstreamer | |
parent | 32761a6cee1d0dee366b885b7b9c777e67885688 (diff) | |
download | WebKitGtk-tarball-master.tar.gz |
webkitgtk-2.16.5HEADwebkitgtk-2.16.5master
Diffstat (limited to 'Source/WebCore/platform/audio/gstreamer')
6 files changed, 726 insertions, 308 deletions
diff --git a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp index 25ddcb9fa..758389ced 100644 --- a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp +++ b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.cpp @@ -1,5 +1,6 @@ /* * Copyright (C) 2011, 2012 Igalia S.L + * Copyright (C) 2014 Sebastian Dröge <sebastian@centricular.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -27,9 +28,9 @@ #include "GRefPtrGStreamer.h" #include "Logging.h" #include "WebKitWebAudioSourceGStreamer.h" +#include <gst/audio/gstaudiobasesink.h> #include <gst/gst.h> -#include <gst/pbutils/pbutils.h> -#include <wtf/gobject/GUniquePtr.h> +#include <wtf/glib/GUniquePtr.h> namespace WebCore { @@ -42,6 +43,12 @@ gboolean messageCallback(GstBus*, GstMessage* message, AudioDestinationGStreamer return destination->handleMessage(message); } +static void autoAudioSinkChildAddedCallback(GstChildProxy*, GObject* object, gchar*, gpointer) +{ + if (GST_IS_AUDIO_BASE_SINK(object)) + g_object_set(GST_AUDIO_BASE_SINK(object), "buffer-time", static_cast<gint64>(100000), nullptr); +} + std::unique_ptr<AudioDestination> AudioDestination::create(AudioIOCallback& callback, const String&, unsigned numberOfInputChannels, unsigned numberOfOutputChannels, float sampleRate) { // FIXME: make use of inputDeviceId as appropriate. @@ -85,45 +92,17 @@ AudioDestinationGStreamer::AudioDestinationGStreamer(AudioIOCallback& callback, "rate", sampleRate, "bus", m_renderBus.get(), "provider", &m_callback, - "frames", framesToPull, NULL)); - - GstElement* wavParser = gst_element_factory_make("wavparse", 0); - - m_wavParserAvailable = wavParser; - ASSERT_WITH_MESSAGE(m_wavParserAvailable, "Failed to create GStreamer wavparse element"); - if (!m_wavParserAvailable) - return; - - gst_bin_add_many(GST_BIN(m_pipeline), webkitAudioSrc, wavParser, NULL); - gst_element_link_pads_full(webkitAudioSrc, "src", wavParser, "sink", GST_PAD_LINK_CHECK_NOTHING); - - GRefPtr<GstPad> srcPad = adoptGRef(gst_element_get_static_pad(wavParser, "src")); - finishBuildingPipelineAfterWavParserPadReady(srcPad.get()); -} - -AudioDestinationGStreamer::~AudioDestinationGStreamer() -{ - GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline))); - ASSERT(bus); - g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this); - gst_bus_remove_signal_watch(bus.get()); - - gst_element_set_state(m_pipeline, GST_STATE_NULL); - gst_object_unref(m_pipeline); -} - -void AudioDestinationGStreamer::finishBuildingPipelineAfterWavParserPadReady(GstPad* pad) -{ - ASSERT(m_wavParserAvailable); + "frames", framesToPull, nullptr)); - GRefPtr<GstElement> audioSink = gst_element_factory_make("autoaudiosink", 0); + GRefPtr<GstElement> audioSink = gst_element_factory_make("autoaudiosink", nullptr); m_audioSinkAvailable = audioSink; - if (!audioSink) { LOG_ERROR("Failed to create GStreamer autoaudiosink element"); return; } + g_signal_connect(audioSink.get(), "child-added", G_CALLBACK(autoAudioSinkChildAddedCallback), nullptr); + // Autoaudiosink does the real sink detection in the GST_STATE_NULL->READY transition // so it's best to roll it to READY as soon as possible to ensure the underlying platform // audiosink was loaded correctly. @@ -135,17 +114,25 @@ void AudioDestinationGStreamer::finishBuildingPipelineAfterWavParserPadReady(Gst return; } - GstElement* audioConvert = gst_element_factory_make("audioconvert", 0); - gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioSink.get(), NULL); + GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr); + GstElement* audioResample = gst_element_factory_make("audioresample", nullptr); + gst_bin_add_many(GST_BIN(m_pipeline), webkitAudioSrc, audioConvert, audioResample, audioSink.get(), nullptr); - // Link wavparse's src pad to audioconvert sink pad. - GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(audioConvert, "sink")); - gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING); + // Link src pads from webkitAudioSrc to audioConvert ! audioResample ! autoaudiosink. + gst_element_link_pads_full(webkitAudioSrc, "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioResample, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING); +} - // Link audioconvert to audiosink and roll states. - gst_element_link_pads_full(audioConvert, "src", audioSink.get(), "sink", GST_PAD_LINK_CHECK_NOTHING); - gst_element_sync_state_with_parent(audioConvert); - gst_element_sync_state_with_parent(audioSink.leakRef()); +AudioDestinationGStreamer::~AudioDestinationGStreamer() +{ + GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline))); + ASSERT(bus); + g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this); + gst_bus_remove_signal_watch(bus.get()); + + gst_element_set_state(m_pipeline, GST_STATE_NULL); + gst_object_unref(m_pipeline); } gboolean AudioDestinationGStreamer::handleMessage(GstMessage* message) @@ -172,18 +159,23 @@ gboolean AudioDestinationGStreamer::handleMessage(GstMessage* message) void AudioDestinationGStreamer::start() { - ASSERT(m_wavParserAvailable); - if (!m_wavParserAvailable) + ASSERT(m_audioSinkAvailable); + if (!m_audioSinkAvailable) return; - gst_element_set_state(m_pipeline, GST_STATE_PLAYING); + if (gst_element_set_state(m_pipeline, GST_STATE_PLAYING) == GST_STATE_CHANGE_FAILURE) { + g_warning("Error: Failed to set pipeline to playing"); + m_isPlaying = false; + return; + } + m_isPlaying = true; } void AudioDestinationGStreamer::stop() { - ASSERT(m_wavParserAvailable && m_audioSinkAvailable); - if (!m_wavParserAvailable || !m_audioSinkAvailable) + ASSERT(m_audioSinkAvailable); + if (!m_audioSinkAvailable) return; gst_element_set_state(m_pipeline, GST_STATE_PAUSED); diff --git a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h index 9dc9a9bea..3b89febc6 100644 --- a/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h +++ b/Source/WebCore/platform/audio/gstreamer/AudioDestinationGStreamer.h @@ -34,14 +34,13 @@ public: AudioDestinationGStreamer(AudioIOCallback&, float sampleRate); virtual ~AudioDestinationGStreamer(); - virtual void start(); - virtual void stop(); + void start() override; + void stop() override; - bool isPlaying() { return m_isPlaying; } - float sampleRate() const { return m_sampleRate; } + bool isPlaying() override { return m_isPlaying; } + float sampleRate() const override { return m_sampleRate; } AudioIOCallback& callback() const { return m_callback; } - void finishBuildingPipelineAfterWavParserPadReady(GstPad*); gboolean handleMessage(GstMessage*); private: @@ -50,7 +49,6 @@ private: float m_sampleRate; bool m_isPlaying; - bool m_wavParserAvailable; bool m_audioSinkAvailable; GstElement* m_pipeline; }; diff --git a/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp index e687e572a..6cd8bd7f8 100644 --- a/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp +++ b/Source/WebCore/platform/audio/gstreamer/AudioFileReaderGStreamer.cpp @@ -22,18 +22,19 @@ #if ENABLE(WEB_AUDIO) #include "AudioFileReader.h" - #include "AudioBus.h" - +#include "GRefPtrGStreamer.h" #include <gio/gio.h> #include <gst/app/gstappsink.h> +#include <gst/audio/audio-info.h> #include <gst/gst.h> -#include <gst/pbutils/pbutils.h> +#include <wtf/MainThread.h> #include <wtf/Noncopyable.h> -#include <wtf/gobject/GRefPtr.h> -#include <wtf/gobject/GUniquePtr.h> - -#include <gst/audio/audio.h> +#include <wtf/RunLoop.h> +#include <wtf/Threading.h> +#include <wtf/WeakPtr.h> +#include <wtf/glib/GRefPtr.h> +#include <wtf/glib/GUniquePtr.h> namespace WebCore { @@ -46,28 +47,36 @@ public: PassRefPtr<AudioBus> createBus(float sampleRate, bool mixToMono); - GstFlowReturn handleSample(GstAppSink*); - gboolean handleMessage(GstMessage*); +private: + WeakPtr<AudioFileReader> createWeakPtr() { return m_weakPtrFactory.createWeakPtr(); } + + static void deinterleavePadAddedCallback(AudioFileReader*, GstPad*); + static void deinterleaveReadyCallback(AudioFileReader*); + static void decodebinPadAddedCallback(AudioFileReader*, GstPad*); + + void handleMessage(GstMessage*); void handleNewDeinterleavePad(GstPad*); void deinterleavePadsConfigured(); void plugDeinterleave(GstPad*); void decodeAudioForBusCreation(); + GstFlowReturn handleSample(GstAppSink*); -private: - const void* m_data; - size_t m_dataSize; - const char* m_filePath; + WeakPtrFactory<AudioFileReader> m_weakPtrFactory; + RunLoop& m_runLoop; + const void* m_data { nullptr }; + size_t m_dataSize { 0 }; + const char* m_filePath { nullptr }; - float m_sampleRate; - GstBufferList* m_frontLeftBuffers; - GstBufferList* m_frontRightBuffers; + float m_sampleRate { 0 }; + int m_channels { 0 }; + GRefPtr<GstBufferList> m_frontLeftBuffers; + GRefPtr<GstBufferList> m_frontRightBuffers; - GstElement* m_pipeline; - unsigned m_channelSize; + GRefPtr<GstElement> m_pipeline; + unsigned m_channelSize { 0 }; GRefPtr<GstElement> m_decodebin; GRefPtr<GstElement> m_deInterleave; - GRefPtr<GMainLoop> m_loop; - bool m_errorOccurred; + bool m_errorOccurred { false }; }; static void copyGstreamerBuffersToAudioChannel(GstBufferList* buffers, AudioChannel* audioChannel) @@ -83,132 +92,104 @@ static void copyGstreamerBuffersToAudioChannel(GstBufferList* buffers, AudioChan } } -static GstFlowReturn onAppsinkPullRequiredCallback(GstAppSink* sink, gpointer userData) -{ - return static_cast<AudioFileReader*>(userData)->handleSample(sink); -} - -gboolean messageCallback(GstBus*, GstMessage* message, AudioFileReader* reader) -{ - return reader->handleMessage(message); -} - -static void onGStreamerDeinterleavePadAddedCallback(GstElement*, GstPad* pad, AudioFileReader* reader) +void AudioFileReader::deinterleavePadAddedCallback(AudioFileReader* reader, GstPad* pad) { reader->handleNewDeinterleavePad(pad); } -static void onGStreamerDeinterleaveReadyCallback(GstElement*, AudioFileReader* reader) +void AudioFileReader::deinterleaveReadyCallback(AudioFileReader* reader) { reader->deinterleavePadsConfigured(); } -static void onGStreamerDecodebinPadAddedCallback(GstElement*, GstPad* pad, AudioFileReader* reader) +void AudioFileReader::decodebinPadAddedCallback(AudioFileReader* reader, GstPad* pad) { reader->plugDeinterleave(pad); } -gboolean enteredMainLoopCallback(gpointer userData) -{ - AudioFileReader* reader = reinterpret_cast<AudioFileReader*>(userData); - reader->decodeAudioForBusCreation(); - return FALSE; -} - AudioFileReader::AudioFileReader(const char* filePath) - : m_data(0) - , m_dataSize(0) + : m_weakPtrFactory(this) + , m_runLoop(RunLoop::current()) , m_filePath(filePath) - , m_channelSize(0) - , m_errorOccurred(false) { } AudioFileReader::AudioFileReader(const void* data, size_t dataSize) - : m_data(data) + : m_weakPtrFactory(this) + , m_runLoop(RunLoop::current()) + , m_data(data) , m_dataSize(dataSize) - , m_filePath(0) - , m_channelSize(0) - , m_errorOccurred(false) { } AudioFileReader::~AudioFileReader() { if (m_pipeline) { - GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline))); + GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline.get()))); ASSERT(bus); - g_signal_handlers_disconnect_by_func(bus.get(), reinterpret_cast<gpointer>(messageCallback), this); - gst_bus_remove_signal_watch(bus.get()); + gst_bus_set_sync_handler(bus.get(), nullptr, nullptr, nullptr); - gst_element_set_state(m_pipeline, GST_STATE_NULL); - gst_object_unref(GST_OBJECT(m_pipeline)); + gst_element_set_state(m_pipeline.get(), GST_STATE_NULL); + m_pipeline = nullptr; } if (m_decodebin) { - g_signal_handlers_disconnect_by_func(m_decodebin.get(), reinterpret_cast<gpointer>(onGStreamerDecodebinPadAddedCallback), this); - m_decodebin.clear(); + g_signal_handlers_disconnect_matched(m_decodebin.get(), G_SIGNAL_MATCH_DATA, 0, 0, nullptr, nullptr, this); + m_decodebin = nullptr; } if (m_deInterleave) { - g_signal_handlers_disconnect_by_func(m_deInterleave.get(), reinterpret_cast<gpointer>(onGStreamerDeinterleavePadAddedCallback), this); - g_signal_handlers_disconnect_by_func(m_deInterleave.get(), reinterpret_cast<gpointer>(onGStreamerDeinterleaveReadyCallback), this); - m_deInterleave.clear(); + g_signal_handlers_disconnect_matched(m_deInterleave.get(), G_SIGNAL_MATCH_DATA, 0, 0, nullptr, nullptr, this); + m_deInterleave = nullptr; } - - gst_buffer_list_unref(m_frontLeftBuffers); - gst_buffer_list_unref(m_frontRightBuffers); } GstFlowReturn AudioFileReader::handleSample(GstAppSink* sink) { - GstSample* sample = gst_app_sink_pull_sample(sink); + GRefPtr<GstSample> sample = adoptGRef(gst_app_sink_pull_sample(sink)); if (!sample) return GST_FLOW_ERROR; - GstBuffer* buffer = gst_sample_get_buffer(sample); - if (!buffer) { - gst_sample_unref(sample); + GstBuffer* buffer = gst_sample_get_buffer(sample.get()); + if (!buffer) return GST_FLOW_ERROR; - } - GstCaps* caps = gst_sample_get_caps(sample); - if (!caps) { - gst_sample_unref(sample); + GstCaps* caps = gst_sample_get_caps(sample.get()); + if (!caps) return GST_FLOW_ERROR; - } GstAudioInfo info; gst_audio_info_from_caps(&info, caps); - int frames = GST_CLOCK_TIME_TO_FRAMES(GST_BUFFER_DURATION(buffer), GST_AUDIO_INFO_RATE(&info)); + int frames = gst_buffer_get_size(buffer) / info.bpf; // Check the first audio channel. The buffer is supposed to store // data of a single channel anyway. switch (GST_AUDIO_INFO_POSITION(&info, 0)) { case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT: - gst_buffer_list_add(m_frontLeftBuffers, gst_buffer_ref(buffer)); + case GST_AUDIO_CHANNEL_POSITION_MONO: + gst_buffer_list_add(m_frontLeftBuffers.get(), gst_buffer_ref(buffer)); m_channelSize += frames; break; case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT: - gst_buffer_list_add(m_frontRightBuffers, gst_buffer_ref(buffer)); + gst_buffer_list_add(m_frontRightBuffers.get(), gst_buffer_ref(buffer)); break; default: break; } - gst_sample_unref(sample); return GST_FLOW_OK; - } -gboolean AudioFileReader::handleMessage(GstMessage* message) +void AudioFileReader::handleMessage(GstMessage* message) { + ASSERT(&m_runLoop == &RunLoop::current()); + GUniqueOutPtr<GError> error; GUniqueOutPtr<gchar> debug; switch (GST_MESSAGE_TYPE(message)) { case GST_MESSAGE_EOS: - g_main_loop_quit(m_loop.get()); + m_runLoop.stop(); break; case GST_MESSAGE_WARNING: gst_message_parse_warning(message, &error.outPtr(), &debug.outPtr()); @@ -218,12 +199,12 @@ gboolean AudioFileReader::handleMessage(GstMessage* message) gst_message_parse_error(message, &error.outPtr(), &debug.outPtr()); g_warning("Error: %d, %s. Debug output: %s", error->code, error->message, debug.get()); m_errorOccurred = true; - g_main_loop_quit(m_loop.get()); + gst_element_set_state(m_pipeline.get(), GST_STATE_NULL); + m_runLoop.stop(); break; default: break; } - return TRUE; } void AudioFileReader::handleNewDeinterleavePad(GstPad* pad) @@ -232,62 +213,69 @@ void AudioFileReader::handleNewDeinterleavePad(GstPad* pad) // in an appsink so we can pull the data from each // channel. Pipeline looks like: // ... deinterleave ! queue ! appsink. - GstElement* queue = gst_element_factory_make("queue", 0); - GstElement* sink = gst_element_factory_make("appsink", 0); + GstElement* queue = gst_element_factory_make("queue", nullptr); + GstElement* sink = gst_element_factory_make("appsink", nullptr); - GstAppSinkCallbacks callbacks; - callbacks.eos = 0; - callbacks.new_preroll = 0; - callbacks.new_sample = onAppsinkPullRequiredCallback; - gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, 0); + static GstAppSinkCallbacks callbacks = { + nullptr, // eos + nullptr, // new_preroll + // new_sample + [](GstAppSink* sink, gpointer userData) -> GstFlowReturn { + return static_cast<AudioFileReader*>(userData)->handleSample(sink); + }, + { nullptr } + }; + gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, nullptr); - g_object_set(sink, "sync", FALSE, NULL); + g_object_set(sink, "sync", FALSE, nullptr); - gst_bin_add_many(GST_BIN(m_pipeline), queue, sink, NULL); + gst_bin_add_many(GST_BIN(m_pipeline.get()), queue, sink, nullptr); - GstPad* sinkPad = gst_element_get_static_pad(queue, "sink"); - gst_pad_link_full(pad, sinkPad, GST_PAD_LINK_CHECK_NOTHING); - gst_object_unref(GST_OBJECT(sinkPad)); + GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink")); + gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING); gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING); - gst_element_set_state(queue, GST_STATE_READY); - gst_element_set_state(sink, GST_STATE_READY); + gst_element_sync_state_with_parent(queue); + gst_element_sync_state_with_parent(sink); } void AudioFileReader::deinterleavePadsConfigured() { // All deinterleave src pads are now available, let's roll to // PLAYING so data flows towards the sinks and it can be retrieved. - gst_element_set_state(m_pipeline, GST_STATE_PLAYING); + gst_element_set_state(m_pipeline.get(), GST_STATE_PLAYING); } void AudioFileReader::plugDeinterleave(GstPad* pad) { + // Ignore any additional source pads just in case. + if (m_deInterleave) + return; + // A decodebin pad was added, plug in a deinterleave element to // separate each planar channel. Sub pipeline looks like // ... decodebin2 ! audioconvert ! audioresample ! capsfilter ! deinterleave. - GstElement* audioConvert = gst_element_factory_make("audioconvert", 0); - GstElement* audioResample = gst_element_factory_make("audioresample", 0); - GstElement* capsFilter = gst_element_factory_make("capsfilter", 0); + GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr); + GstElement* audioResample = gst_element_factory_make("audioresample", nullptr); + GstElement* capsFilter = gst_element_factory_make("capsfilter", nullptr); m_deInterleave = gst_element_factory_make("deinterleave", "deinterleave"); - g_object_set(m_deInterleave.get(), "keep-positions", TRUE, NULL); - g_signal_connect(m_deInterleave.get(), "pad-added", G_CALLBACK(onGStreamerDeinterleavePadAddedCallback), this); - g_signal_connect(m_deInterleave.get(), "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this); + g_object_set(m_deInterleave.get(), "keep-positions", TRUE, nullptr); + g_signal_connect_swapped(m_deInterleave.get(), "pad-added", G_CALLBACK(deinterleavePadAddedCallback), this); + g_signal_connect_swapped(m_deInterleave.get(), "no-more-pads", G_CALLBACK(deinterleaveReadyCallback), this); - GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(m_sampleRate), - "channels", G_TYPE_INT, 2, - "format", G_TYPE_STRING, gst_audio_format_to_string(GST_AUDIO_FORMAT_F32), - "layout", G_TYPE_STRING, "interleaved", nullptr); - g_object_set(capsFilter, "caps", caps, NULL); - gst_caps_unref(caps); + GRefPtr<GstCaps> caps = adoptGRef(gst_caps_new_simple("audio/x-raw", + "rate", G_TYPE_INT, static_cast<int>(m_sampleRate), + "channels", G_TYPE_INT, m_channels, + "format", G_TYPE_STRING, GST_AUDIO_NE(F32), + "layout", G_TYPE_STRING, "interleaved", nullptr)); + g_object_set(capsFilter, "caps", caps.get(), nullptr); - gst_bin_add_many(GST_BIN(m_pipeline), audioConvert, audioResample, capsFilter, m_deInterleave.get(), NULL); + gst_bin_add_many(GST_BIN(m_pipeline.get()), audioConvert, audioResample, capsFilter, m_deInterleave.get(), nullptr); - GstPad* sinkPad = gst_element_get_static_pad(audioConvert, "sink"); - gst_pad_link_full(pad, sinkPad, GST_PAD_LINK_CHECK_NOTHING); - gst_object_unref(GST_OBJECT(sinkPad)); + GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(audioConvert, "sink")); + gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING); gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING); gst_element_link_pads_full(audioResample, "src", capsFilter, "sink", GST_PAD_LINK_CHECK_NOTHING); @@ -301,75 +289,102 @@ void AudioFileReader::plugDeinterleave(GstPad* pad) void AudioFileReader::decodeAudioForBusCreation() { + ASSERT(&m_runLoop == &RunLoop::current()); + // Build the pipeline (giostreamsrc | filesrc) ! decodebin2 // A deinterleave element is added once a src pad becomes available in decodebin. - m_pipeline = gst_pipeline_new(0); + m_pipeline = gst_pipeline_new(nullptr); - GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline))); + GRefPtr<GstBus> bus = adoptGRef(gst_pipeline_get_bus(GST_PIPELINE(m_pipeline.get()))); ASSERT(bus); - gst_bus_add_signal_watch(bus.get()); - g_signal_connect(bus.get(), "message", G_CALLBACK(messageCallback), this); + gst_bus_set_sync_handler(bus.get(), [](GstBus*, GstMessage* message, gpointer userData) { + auto& reader = *static_cast<AudioFileReader*>(userData); + if (&reader.m_runLoop == &RunLoop::current()) + reader.handleMessage(message); + else { + GRefPtr<GstMessage> protectMessage(message); + auto weakThis = reader.createWeakPtr(); + reader.m_runLoop.dispatch([weakThis, protectMessage] { + if (weakThis) + weakThis->handleMessage(protectMessage.get()); + }); + } + gst_message_unref(message); + return GST_BUS_DROP; + }, this, nullptr); GstElement* source; if (m_data) { ASSERT(m_dataSize); - source = gst_element_factory_make("giostreamsrc", 0); - GRefPtr<GInputStream> memoryStream = adoptGRef(g_memory_input_stream_new_from_data(m_data, m_dataSize, 0)); - g_object_set(source, "stream", memoryStream.get(), NULL); + source = gst_element_factory_make("giostreamsrc", nullptr); + GRefPtr<GInputStream> memoryStream = adoptGRef(g_memory_input_stream_new_from_data(m_data, m_dataSize, nullptr)); + g_object_set(source, "stream", memoryStream.get(), nullptr); } else { - source = gst_element_factory_make("filesrc", 0); - g_object_set(source, "location", m_filePath, NULL); + source = gst_element_factory_make("filesrc", nullptr); + g_object_set(source, "location", m_filePath, nullptr); } m_decodebin = gst_element_factory_make("decodebin", "decodebin"); - g_signal_connect(m_decodebin.get(), "pad-added", G_CALLBACK(onGStreamerDecodebinPadAddedCallback), this); + g_signal_connect_swapped(m_decodebin.get(), "pad-added", G_CALLBACK(decodebinPadAddedCallback), this); - gst_bin_add_many(GST_BIN(m_pipeline), source, m_decodebin.get(), NULL); + gst_bin_add_many(GST_BIN(m_pipeline.get()), source, m_decodebin.get(), nullptr); gst_element_link_pads_full(source, "src", m_decodebin.get(), "sink", GST_PAD_LINK_CHECK_NOTHING); - gst_element_set_state(m_pipeline, GST_STATE_PAUSED); + + // Catch errors here immediately, there might not be an error message if we're unlucky. + if (gst_element_set_state(m_pipeline.get(), GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) { + g_warning("Error: Failed to set pipeline to PAUSED"); + m_errorOccurred = true; + m_runLoop.stop(); + } } PassRefPtr<AudioBus> AudioFileReader::createBus(float sampleRate, bool mixToMono) { m_sampleRate = sampleRate; + m_channels = mixToMono ? 1 : 2; - m_frontLeftBuffers = gst_buffer_list_new(); - m_frontRightBuffers = gst_buffer_list_new(); - - GRefPtr<GMainContext> context = adoptGRef(g_main_context_new()); - g_main_context_push_thread_default(context.get()); - m_loop = adoptGRef(g_main_loop_new(context.get(), FALSE)); + m_frontLeftBuffers = adoptGRef(gst_buffer_list_new()); + m_frontRightBuffers = adoptGRef(gst_buffer_list_new()); // Start the pipeline processing just after the loop is started. - GRefPtr<GSource> timeoutSource = adoptGRef(g_timeout_source_new(0)); - g_source_attach(timeoutSource.get(), context.get()); - g_source_set_callback(timeoutSource.get(), reinterpret_cast<GSourceFunc>(enteredMainLoopCallback), this, 0); + m_runLoop.dispatch([this] { decodeAudioForBusCreation(); }); + m_runLoop.run(); - g_main_loop_run(m_loop.get()); - g_main_context_pop_thread_default(context.get()); + // Set pipeline to GST_STATE_NULL state here already ASAP to + // release any resources that might still be used. + gst_element_set_state(m_pipeline.get(), GST_STATE_NULL); if (m_errorOccurred) - return 0; + return nullptr; - unsigned channels = mixToMono ? 1 : 2; - RefPtr<AudioBus> audioBus = AudioBus::create(channels, m_channelSize, true); + RefPtr<AudioBus> audioBus = AudioBus::create(m_channels, m_channelSize, true); audioBus->setSampleRate(m_sampleRate); - copyGstreamerBuffersToAudioChannel(m_frontLeftBuffers, audioBus->channel(0)); + copyGstreamerBuffersToAudioChannel(m_frontLeftBuffers.get(), audioBus->channel(0)); if (!mixToMono) - copyGstreamerBuffersToAudioChannel(m_frontRightBuffers, audioBus->channel(1)); + copyGstreamerBuffersToAudioChannel(m_frontRightBuffers.get(), audioBus->channel(1)); return audioBus; } PassRefPtr<AudioBus> createBusFromAudioFile(const char* filePath, bool mixToMono, float sampleRate) { - return AudioFileReader(filePath).createBus(sampleRate, mixToMono); + RefPtr<AudioBus> returnValue; + auto threadID = createThread("AudioFileReader", [&returnValue, filePath, mixToMono, sampleRate] { + returnValue = AudioFileReader(filePath).createBus(sampleRate, mixToMono); + }); + waitForThreadCompletion(threadID); + return returnValue; } PassRefPtr<AudioBus> createBusFromInMemoryAudioFile(const void* data, size_t dataSize, bool mixToMono, float sampleRate) { - return AudioFileReader(data, dataSize).createBus(sampleRate, mixToMono); + RefPtr<AudioBus> returnValue; + auto threadID = createThread("AudioFileReader", [&returnValue, data, dataSize, mixToMono, sampleRate] { + returnValue = AudioFileReader(data, dataSize).createBus(sampleRate, mixToMono); + }); + waitForThreadCompletion(threadID); + return returnValue; } } // WebCore diff --git a/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp new file mode 100644 index 000000000..4d7f4154d --- /dev/null +++ b/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.cpp @@ -0,0 +1,349 @@ +/* + * Copyright (C) 2014 Igalia S.L + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "config.h" +#include "AudioSourceProviderGStreamer.h" + +#if ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER) + +#include "AudioBus.h" +#include "AudioSourceProviderClient.h" +#include <gst/app/gstappsink.h> +#include <gst/audio/audio-info.h> +#include <gst/base/gstadapter.h> +#include <wtf/glib/GMutexLocker.h> + + +namespace WebCore { + +// For now the provider supports only stereo files at a fixed sample +// bitrate. +static const int gNumberOfChannels = 2; +static const float gSampleBitRate = 44100; + +static GstFlowReturn onAppsinkNewBufferCallback(GstAppSink* sink, gpointer userData) +{ + return static_cast<AudioSourceProviderGStreamer*>(userData)->handleAudioBuffer(sink); +} + +static void onGStreamerDeinterleavePadAddedCallback(GstElement*, GstPad* pad, AudioSourceProviderGStreamer* provider) +{ + provider->handleNewDeinterleavePad(pad); +} + +static void onGStreamerDeinterleaveReadyCallback(GstElement*, AudioSourceProviderGStreamer* provider) +{ + provider->deinterleavePadsConfigured(); +} + +static void onGStreamerDeinterleavePadRemovedCallback(GstElement*, GstPad* pad, AudioSourceProviderGStreamer* provider) +{ + provider->handleRemovedDeinterleavePad(pad); +} + +static GstPadProbeReturn onAppsinkFlushCallback(GstPad*, GstPadProbeInfo* info, gpointer userData) +{ + if (GST_PAD_PROBE_INFO_TYPE(info) & (GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM | GST_PAD_PROBE_TYPE_EVENT_FLUSH)) { + GstEvent* event = GST_PAD_PROBE_INFO_EVENT(info); + if (GST_EVENT_TYPE(event) == GST_EVENT_FLUSH_STOP) { + AudioSourceProviderGStreamer* provider = reinterpret_cast<AudioSourceProviderGStreamer*>(userData); + provider->clearAdapters(); + } + } + return GST_PAD_PROBE_OK; +} + +static void copyGStreamerBuffersToAudioChannel(GstAdapter* adapter, AudioBus* bus , int channelNumber, size_t framesToProcess) +{ + if (!gst_adapter_available(adapter)) { + bus->zero(); + return; + } + + size_t bytes = framesToProcess * sizeof(float); + if (gst_adapter_available(adapter) >= bytes) { + gst_adapter_copy(adapter, bus->channel(channelNumber)->mutableData(), 0, bytes); + gst_adapter_flush(adapter, bytes); + } +} + +AudioSourceProviderGStreamer::AudioSourceProviderGStreamer() + : m_client(nullptr) + , m_deinterleaveSourcePads(0) + , m_deinterleavePadAddedHandlerId(0) + , m_deinterleaveNoMorePadsHandlerId(0) + , m_deinterleavePadRemovedHandlerId(0) +{ + g_mutex_init(&m_adapterMutex); + m_frontLeftAdapter = gst_adapter_new(); + m_frontRightAdapter = gst_adapter_new(); +} + +AudioSourceProviderGStreamer::~AudioSourceProviderGStreamer() +{ + GRefPtr<GstElement> deinterleave = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "deinterleave")); + if (deinterleave) { + g_signal_handler_disconnect(deinterleave.get(), m_deinterleavePadAddedHandlerId); + g_signal_handler_disconnect(deinterleave.get(), m_deinterleaveNoMorePadsHandlerId); + g_signal_handler_disconnect(deinterleave.get(), m_deinterleavePadRemovedHandlerId); + } + + g_object_unref(m_frontLeftAdapter); + g_object_unref(m_frontRightAdapter); + g_mutex_clear(&m_adapterMutex); +} + +void AudioSourceProviderGStreamer::configureAudioBin(GstElement* audioBin, GstElement* teePredecessor) +{ + m_audioSinkBin = audioBin; + + GstElement* audioTee = gst_element_factory_make("tee", "audioTee"); + GstElement* audioQueue = gst_element_factory_make("queue", nullptr); + GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr); + GstElement* audioConvert2 = gst_element_factory_make("audioconvert", nullptr); + GstElement* audioResample = gst_element_factory_make("audioresample", nullptr); + GstElement* audioResample2 = gst_element_factory_make("audioresample", nullptr); + GstElement* volumeElement = gst_element_factory_make("volume", "volume"); + GstElement* audioSink = gst_element_factory_make("autoaudiosink", nullptr); + + gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), audioTee, audioQueue, audioConvert, audioResample, volumeElement, audioConvert2, audioResample2, audioSink, nullptr); + + // In cases where the audio-sink needs elements before tee (such + // as scaletempo) they need to be linked to tee which in this case + // doesn't need a ghost pad. It is assumed that the teePredecessor + // chain already configured a ghost pad. + if (teePredecessor) + gst_element_link_pads_full(teePredecessor, "src", audioTee, "sink", GST_PAD_LINK_CHECK_NOTHING); + else { + // Add a ghostpad to the bin so it can proxy to tee. + GRefPtr<GstPad> audioTeeSinkPad = adoptGRef(gst_element_get_static_pad(audioTee, "sink")); + gst_element_add_pad(m_audioSinkBin.get(), gst_ghost_pad_new("sink", audioTeeSinkPad.get())); + } + + // Link a new src pad from tee to queue ! audioconvert ! + // audioresample ! volume ! audioconvert ! audioresample ! + // autoaudiosink. The audioresample and audioconvert are needed to + // ensure the audio sink receives buffers in the correct format. + gst_element_link_pads_full(audioTee, "src_%u", audioQueue, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioQueue, "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioResample, "src", volumeElement, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(volumeElement, "src", audioConvert2, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioConvert2, "src", audioResample2, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioResample2, "src", audioSink, "sink", GST_PAD_LINK_CHECK_NOTHING); +} + +void AudioSourceProviderGStreamer::provideInput(AudioBus* bus, size_t framesToProcess) +{ + WTF::GMutexLocker<GMutex> lock(m_adapterMutex); + copyGStreamerBuffersToAudioChannel(m_frontLeftAdapter, bus, 0, framesToProcess); + copyGStreamerBuffersToAudioChannel(m_frontRightAdapter, bus, 1, framesToProcess); +} + +GstFlowReturn AudioSourceProviderGStreamer::handleAudioBuffer(GstAppSink* sink) +{ + if (!m_client) + return GST_FLOW_OK; + + // Pull a buffer from appsink and store it the appropriate buffer + // list for the audio channel it represents. + GRefPtr<GstSample> sample = adoptGRef(gst_app_sink_pull_sample(sink)); + if (!sample) + return gst_app_sink_is_eos(sink) ? GST_FLOW_EOS : GST_FLOW_ERROR; + + GstBuffer* buffer = gst_sample_get_buffer(sample.get()); + if (!buffer) + return GST_FLOW_ERROR; + + GstCaps* caps = gst_sample_get_caps(sample.get()); + if (!caps) + return GST_FLOW_ERROR; + + GstAudioInfo info; + gst_audio_info_from_caps(&info, caps); + + WTF::GMutexLocker<GMutex> lock(m_adapterMutex); + + // Check the first audio channel. The buffer is supposed to store + // data of a single channel anyway. + switch (GST_AUDIO_INFO_POSITION(&info, 0)) { + case GST_AUDIO_CHANNEL_POSITION_FRONT_LEFT: + case GST_AUDIO_CHANNEL_POSITION_MONO: + gst_adapter_push(m_frontLeftAdapter, gst_buffer_ref(buffer)); + break; + case GST_AUDIO_CHANNEL_POSITION_FRONT_RIGHT: + gst_adapter_push(m_frontRightAdapter, gst_buffer_ref(buffer)); + break; + default: + break; + } + + return GST_FLOW_OK; +} + +void AudioSourceProviderGStreamer::setClient(AudioSourceProviderClient* client) +{ + ASSERT(client); + m_client = client; + + // The volume element is used to mute audio playback towards the + // autoaudiosink. This is needed to avoid double playback of audio + // from our audio sink and from the WebAudio AudioDestination node + // supposedly configured already by application side. + GRefPtr<GstElement> volumeElement = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "volume")); + g_object_set(volumeElement.get(), "mute", TRUE, nullptr); + + // The audioconvert and audioresample elements are needed to + // ensure deinterleave and the sinks downstream receive buffers in + // the format specified by the capsfilter. + GstElement* audioQueue = gst_element_factory_make("queue", nullptr); + GstElement* audioConvert = gst_element_factory_make("audioconvert", nullptr); + GstElement* audioResample = gst_element_factory_make("audioresample", nullptr); + GstElement* capsFilter = gst_element_factory_make("capsfilter", nullptr); + GstElement* deInterleave = gst_element_factory_make("deinterleave", "deinterleave"); + + g_object_set(deInterleave, "keep-positions", TRUE, nullptr); + m_deinterleavePadAddedHandlerId = g_signal_connect(deInterleave, "pad-added", G_CALLBACK(onGStreamerDeinterleavePadAddedCallback), this); + m_deinterleaveNoMorePadsHandlerId = g_signal_connect(deInterleave, "no-more-pads", G_CALLBACK(onGStreamerDeinterleaveReadyCallback), this); + m_deinterleavePadRemovedHandlerId = g_signal_connect(deInterleave, "pad-removed", G_CALLBACK(onGStreamerDeinterleavePadRemovedCallback), this); + + GstCaps* caps = gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate), + "channels", G_TYPE_INT, gNumberOfChannels, + "format", G_TYPE_STRING, GST_AUDIO_NE(F32), + "layout", G_TYPE_STRING, "interleaved", nullptr); + + g_object_set(capsFilter, "caps", caps, nullptr); + gst_caps_unref(caps); + + gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), audioQueue, audioConvert, audioResample, capsFilter, deInterleave, nullptr); + + GRefPtr<GstElement> audioTee = adoptGRef(gst_bin_get_by_name(GST_BIN(m_audioSinkBin.get()), "audioTee")); + + // Link a new src pad from tee to queue ! audioconvert ! + // audioresample ! capsfilter ! deinterleave. Later + // on each deinterleaved planar audio channel will be routed to an + // appsink for data extraction and processing. + gst_element_link_pads_full(audioTee.get(), "src_%u", audioQueue, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioQueue, "src", audioConvert, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioConvert, "src", audioResample, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(audioResample, "src", capsFilter, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_link_pads_full(capsFilter, "src", deInterleave, "sink", GST_PAD_LINK_CHECK_NOTHING); + + gst_element_sync_state_with_parent(audioQueue); + gst_element_sync_state_with_parent(audioConvert); + gst_element_sync_state_with_parent(audioResample); + gst_element_sync_state_with_parent(capsFilter); + gst_element_sync_state_with_parent(deInterleave); +} + +void AudioSourceProviderGStreamer::handleNewDeinterleavePad(GstPad* pad) +{ + m_deinterleaveSourcePads++; + + if (m_deinterleaveSourcePads > 2) { + g_warning("The AudioSourceProvider supports only mono and stereo audio. Silencing out this new channel."); + GstElement* queue = gst_element_factory_make("queue", nullptr); + GstElement* sink = gst_element_factory_make("fakesink", nullptr); + g_object_set(sink, "async", FALSE, nullptr); + gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr); + + GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink")); + gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING); + + GQuark quark = g_quark_from_static_string("peer"); + g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get()); + gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_element_sync_state_with_parent(queue); + gst_element_sync_state_with_parent(sink); + return; + } + + // A new pad for a planar channel was added in deinterleave. Plug + // in an appsink so we can pull the data from each + // channel. Pipeline looks like: + // ... deinterleave ! queue ! appsink. + GstElement* queue = gst_element_factory_make("queue", nullptr); + GstElement* sink = gst_element_factory_make("appsink", nullptr); + + GstAppSinkCallbacks callbacks; + callbacks.eos = nullptr; + callbacks.new_preroll = nullptr; + callbacks.new_sample = onAppsinkNewBufferCallback; + gst_app_sink_set_callbacks(GST_APP_SINK(sink), &callbacks, this, nullptr); + + g_object_set(sink, "async", FALSE, nullptr); + + GRefPtr<GstCaps> caps = adoptGRef(gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(gSampleBitRate), + "channels", G_TYPE_INT, 1, + "format", G_TYPE_STRING, GST_AUDIO_NE(F32), + "layout", G_TYPE_STRING, "interleaved", nullptr)); + + gst_app_sink_set_caps(GST_APP_SINK(sink), caps.get()); + + gst_bin_add_many(GST_BIN(m_audioSinkBin.get()), queue, sink, nullptr); + + GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue, "sink")); + gst_pad_link_full(pad, sinkPad.get(), GST_PAD_LINK_CHECK_NOTHING); + + GQuark quark = g_quark_from_static_string("peer"); + g_object_set_qdata(G_OBJECT(pad), quark, sinkPad.get()); + + gst_element_link_pads_full(queue, "src", sink, "sink", GST_PAD_LINK_CHECK_NOTHING); + + sinkPad = adoptGRef(gst_element_get_static_pad(sink, "sink")); + gst_pad_add_probe(sinkPad.get(), GST_PAD_PROBE_TYPE_EVENT_FLUSH, onAppsinkFlushCallback, this, nullptr); + + gst_element_sync_state_with_parent(queue); + gst_element_sync_state_with_parent(sink); +} + +void AudioSourceProviderGStreamer::handleRemovedDeinterleavePad(GstPad* pad) +{ + m_deinterleaveSourcePads--; + + // Remove the queue ! appsink chain downstream of deinterleave. + GQuark quark = g_quark_from_static_string("peer"); + GstPad* sinkPad = reinterpret_cast<GstPad*>(g_object_get_qdata(G_OBJECT(pad), quark)); + GRefPtr<GstElement> queue = adoptGRef(gst_pad_get_parent_element(sinkPad)); + GRefPtr<GstPad> queueSrcPad = adoptGRef(gst_element_get_static_pad(queue.get(), "src")); + GRefPtr<GstPad> appsinkSinkPad = adoptGRef(gst_pad_get_peer(queueSrcPad.get())); + GRefPtr<GstElement> sink = adoptGRef(gst_pad_get_parent_element(appsinkSinkPad.get())); + gst_element_set_state(sink.get(), GST_STATE_NULL); + gst_element_set_state(queue.get(), GST_STATE_NULL); + gst_element_unlink(queue.get(), sink.get()); + gst_bin_remove_many(GST_BIN(m_audioSinkBin.get()), queue.get(), sink.get(), nullptr); +} + +void AudioSourceProviderGStreamer::deinterleavePadsConfigured() +{ + ASSERT(m_client); + ASSERT(m_deinterleaveSourcePads == gNumberOfChannels); + + m_client->setFormat(m_deinterleaveSourcePads, gSampleBitRate); +} + +void AudioSourceProviderGStreamer::clearAdapters() +{ + WTF::GMutexLocker<GMutex> lock(m_adapterMutex); + gst_adapter_clear(m_frontLeftAdapter); + gst_adapter_clear(m_frontRightAdapter); +} + +} // WebCore + +#endif // ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER) diff --git a/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.h b/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.h new file mode 100644 index 000000000..5b6480f3a --- /dev/null +++ b/Source/WebCore/platform/audio/gstreamer/AudioSourceProviderGStreamer.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2014 Igalia S.L + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AudioSourceProviderGStreamer_h +#define AudioSourceProviderGStreamer_h + +#if ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER) + +#include "AudioSourceProvider.h" +#include "GRefPtrGStreamer.h" +#include <gst/gst.h> +#include <wtf/Forward.h> +#include <wtf/Noncopyable.h> + +typedef struct _GstAdapter GstAdapter; +typedef struct _GstAppSink GstAppSink; + +namespace WebCore { + +class AudioSourceProviderGStreamer : public AudioSourceProvider { + WTF_MAKE_NONCOPYABLE(AudioSourceProviderGStreamer); +public: + AudioSourceProviderGStreamer(); + ~AudioSourceProviderGStreamer(); + + void configureAudioBin(GstElement* audioBin, GstElement* teePredecessor); + + void provideInput(AudioBus*, size_t framesToProcess) override; + void setClient(AudioSourceProviderClient*) override; + const AudioSourceProviderClient* client() const { return m_client; } + + void handleNewDeinterleavePad(GstPad*); + void deinterleavePadsConfigured(); + void handleRemovedDeinterleavePad(GstPad*); + + GstFlowReturn handleAudioBuffer(GstAppSink*); + GstElement* getAudioBin() const { return m_audioSinkBin.get(); } + void clearAdapters(); + +private: + GRefPtr<GstElement> m_audioSinkBin; + AudioSourceProviderClient* m_client; + int m_deinterleaveSourcePads; + GstAdapter* m_frontLeftAdapter; + GstAdapter* m_frontRightAdapter; + unsigned long m_deinterleavePadAddedHandlerId; + unsigned long m_deinterleaveNoMorePadsHandlerId; + unsigned long m_deinterleavePadRemovedHandlerId; + GMutex m_adapterMutex; +}; + +} +#endif // ENABLE(WEB_AUDIO) && ENABLE(VIDEO) && USE(GSTREAMER) + +#endif // AudioSourceProviderGStreamer_h diff --git a/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp b/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp index ff672f371..445c9793c 100644 --- a/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp +++ b/Source/WebCore/platform/audio/gstreamer/WebKitWebAudioSourceGStreamer.cpp @@ -1,5 +1,6 @@ /* * Copyright (C) 2011, 2012 Igalia S.L + * Copyright (C) 2014 Sebastian Dröge <sebastian@centricular.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -26,9 +27,10 @@ #include "AudioIOCallback.h" #include "GRefPtrGStreamer.h" #include "GStreamerUtilities.h" -#include <gst/audio/audio.h> -#include <gst/pbutils/pbutils.h> -#include <wtf/gobject/GUniquePtr.h> +#include <gst/app/gstappsrc.h> +#include <gst/audio/audio-info.h> +#include <gst/pbutils/missing-plugins.h> +#include <wtf/glib/GUniquePtr.h> using namespace WebCore; @@ -51,18 +53,22 @@ struct _WebKitWebAudioSourcePrivate { AudioBus* bus; AudioIOCallback* provider; guint framesToPull; + guint bufferSize; GRefPtr<GstElement> interleave; - GRefPtr<GstElement> wavEncoder; GRefPtr<GstTask> task; GRecMutex mutex; - GSList* pads; // List of queue sink pads. One queue for each planar audio channel. - GstPad* sourcePad; // src pad of the element, interleaved wav data is pushed to it. + // List of appsrc. One appsrc for each planar audio channel. + Vector<GRefPtr<GstElement>> sources; - bool newStreamEventPending; - GstSegment segment; + // src pad of the element, interleaved wav data is pushed to it. + GstPad* sourcePad; + + guint64 numberOfSamples; + + GRefPtr<GstBufferPool> pool; }; enum { @@ -73,9 +79,9 @@ enum { }; static GstStaticPadTemplate srcTemplate = GST_STATIC_PAD_TEMPLATE("src", - GST_PAD_SRC, - GST_PAD_ALWAYS, - GST_STATIC_CAPS("audio/x-wav")); + GST_PAD_SRC, + GST_PAD_ALWAYS, + GST_STATIC_CAPS(GST_AUDIO_CAPS_MAKE(GST_AUDIO_NE(F32)))); GST_DEBUG_CATEGORY_STATIC(webkit_web_audio_src_debug); #define GST_CAT_DEFAULT webkit_web_audio_src_debug @@ -91,8 +97,8 @@ static GstCaps* getGStreamerMonoAudioCaps(float sampleRate) { return gst_caps_new_simple("audio/x-raw", "rate", G_TYPE_INT, static_cast<int>(sampleRate), "channels", G_TYPE_INT, 1, - "format", G_TYPE_STRING, gst_audio_format_to_string(GST_AUDIO_FORMAT_F32), - "layout", G_TYPE_STRING, "non-interleaved", NULL); + "format", G_TYPE_STRING, GST_AUDIO_NE(F32), + "layout", G_TYPE_STRING, "interleaved", nullptr); } static GstAudioChannelPosition webKitWebAudioGStreamerChannelPosition(int channelIndex) @@ -178,17 +184,14 @@ static void webkit_web_audio_src_init(WebKitWebAudioSrc* src) src->priv = priv; new (priv) WebKitWebAudioSourcePrivate(); - priv->sourcePad = webkitGstGhostPadFromStaticTemplate(&srcTemplate, "src", 0); + priv->sourcePad = webkitGstGhostPadFromStaticTemplate(&srcTemplate, "src", nullptr); gst_element_add_pad(GST_ELEMENT(src), priv->sourcePad); - priv->provider = 0; - priv->bus = 0; - - priv->newStreamEventPending = true; - gst_segment_init(&priv->segment, GST_FORMAT_TIME); + priv->provider = nullptr; + priv->bus = nullptr; g_rec_mutex_init(&priv->mutex); - priv->task = gst_task_new(reinterpret_cast<GstTaskFunction>(webKitWebAudioSrcLoop), src, 0); + priv->task = adoptGRef(gst_task_new(reinterpret_cast<GstTaskFunction>(webKitWebAudioSrcLoop), src, nullptr)); gst_task_set_lock(priv->task.get(), &priv->mutex); } @@ -202,54 +205,40 @@ static void webKitWebAudioSrcConstructed(GObject* object) ASSERT(priv->provider); ASSERT(priv->sampleRate); - priv->interleave = gst_element_factory_make("interleave", 0); - priv->wavEncoder = gst_element_factory_make("wavenc", 0); + priv->interleave = gst_element_factory_make("interleave", nullptr); if (!priv->interleave) { GST_ERROR_OBJECT(src, "Failed to create interleave"); return; } - if (!priv->wavEncoder) { - GST_ERROR_OBJECT(src, "Failed to create wavenc"); - return; - } - - gst_bin_add_many(GST_BIN(src), priv->interleave.get(), priv->wavEncoder.get(), NULL); - gst_element_link_pads_full(priv->interleave.get(), "src", priv->wavEncoder.get(), "sink", GST_PAD_LINK_CHECK_NOTHING); + gst_bin_add(GST_BIN(src), priv->interleave.get()); // For each channel of the bus create a new upstream branch for interleave, like: - // queue ! capsfilter ! audioconvert. which is plugged to a new interleave request sinkpad. + // appsrc ! . which is plugged to a new interleave request sinkpad. for (unsigned channelIndex = 0; channelIndex < priv->bus->numberOfChannels(); channelIndex++) { - GUniquePtr<gchar> queueName(g_strdup_printf("webaudioQueue%u", channelIndex)); - GstElement* queue = gst_element_factory_make("queue", queueName.get()); - GstElement* capsfilter = gst_element_factory_make("capsfilter", 0); - GstElement* audioconvert = gst_element_factory_make("audioconvert", 0); - + GUniquePtr<gchar> appsrcName(g_strdup_printf("webaudioSrc%u", channelIndex)); + GRefPtr<GstElement> appsrc = gst_element_factory_make("appsrc", appsrcName.get()); GRefPtr<GstCaps> monoCaps = adoptGRef(getGStreamerMonoAudioCaps(priv->sampleRate)); GstAudioInfo info; gst_audio_info_from_caps(&info, monoCaps.get()); GST_AUDIO_INFO_POSITION(&info, 0) = webKitWebAudioGStreamerChannelPosition(channelIndex); GRefPtr<GstCaps> caps = adoptGRef(gst_audio_info_to_caps(&info)); - g_object_set(capsfilter, "caps", caps.get(), NULL); - - // Configure the queue for minimal latency. - g_object_set(queue, "max-size-buffers", static_cast<guint>(1), NULL); - GstPad* pad = gst_element_get_static_pad(queue, "sink"); - priv->pads = g_slist_prepend(priv->pads, pad); + // Configure the appsrc for minimal latency. + g_object_set(appsrc.get(), "max-bytes", static_cast<guint64>(2 * priv->bufferSize), "block", TRUE, + "blocksize", priv->bufferSize, + "format", GST_FORMAT_TIME, "caps", caps.get(), nullptr); - gst_bin_add_many(GST_BIN(src), queue, capsfilter, audioconvert, NULL); - gst_element_link_pads_full(queue, "src", capsfilter, "sink", GST_PAD_LINK_CHECK_NOTHING); - gst_element_link_pads_full(capsfilter, "src", audioconvert, "sink", GST_PAD_LINK_CHECK_NOTHING); - gst_element_link_pads_full(audioconvert, "src", priv->interleave.get(), 0, GST_PAD_LINK_CHECK_NOTHING); + priv->sources.append(appsrc); + gst_bin_add(GST_BIN(src), appsrc.get()); + gst_element_link_pads_full(appsrc.get(), "src", priv->interleave.get(), "sink_%u", GST_PAD_LINK_CHECK_NOTHING); } - priv->pads = g_slist_reverse(priv->pads); - // wavenc's src pad is the only visible pad of our element. - GRefPtr<GstPad> targetPad = adoptGRef(gst_element_get_static_pad(priv->wavEncoder.get(), "src")); + // interleave's src pad is the only visible pad of our element. + GRefPtr<GstPad> targetPad = adoptGRef(gst_element_get_static_pad(priv->interleave.get(), "src")); gst_ghost_pad_set_target(GST_GHOST_PAD(priv->sourcePad), targetPad.get()); } @@ -260,8 +249,6 @@ static void webKitWebAudioSrcFinalize(GObject* object) g_rec_mutex_clear(&priv->mutex); - g_slist_free_full(priv->pads, reinterpret_cast<GDestroyNotify>(gst_object_unref)); - priv->~WebKitWebAudioSourcePrivate(); GST_CALL_PARENT(G_OBJECT_CLASS, finalize, ((GObject* )(src))); } @@ -283,6 +270,7 @@ static void webKitWebAudioSrcSetProperty(GObject* object, guint propertyId, cons break; case PROP_FRAMES: priv->framesToPull = g_value_get_uint(value); + priv->bufferSize = sizeof(float) * priv->framesToPull; break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, propertyId, pspec); @@ -320,68 +308,65 @@ static void webKitWebAudioSrcLoop(WebKitWebAudioSrc* src) ASSERT(priv->bus); ASSERT(priv->provider); - if (!priv->provider || !priv->bus) + if (!priv->provider || !priv->bus) { + GST_ELEMENT_ERROR(src, CORE, FAILED, ("Internal WebAudioSrc error"), ("Can't start without provider or bus")); + gst_task_stop(src->priv->task.get()); return; - - GSList* channelBufferList = 0; - register int i; - unsigned bufferSize = priv->framesToPull * sizeof(float); - for (i = g_slist_length(priv->pads) - 1; i >= 0; i--) { - GstBuffer* channelBuffer = gst_buffer_new_and_alloc(bufferSize); - ASSERT(channelBuffer); - channelBufferList = g_slist_prepend(channelBufferList, channelBuffer); - GstMapInfo info; - gst_buffer_map(channelBuffer, &info, GST_MAP_READ); - priv->bus->setChannelMemory(i, reinterpret_cast<float*>(info.data), priv->framesToPull); - gst_buffer_unmap(channelBuffer, &info); } - // FIXME: Add support for local/live audio input. - priv->provider->render(0, priv->bus, priv->framesToPull); - - GSList* padsIt = priv->pads; - GSList* buffersIt = channelBufferList; - -#if GST_CHECK_VERSION(1, 2, 0) - guint groupId = 0; - if (priv->newStreamEventPending) - groupId = gst_util_group_id_next(); -#endif - - for (i = 0; padsIt && buffersIt; padsIt = g_slist_next(padsIt), buffersIt = g_slist_next(buffersIt), ++i) { - GstPad* pad = static_cast<GstPad*>(padsIt->data); - GstBuffer* channelBuffer = static_cast<GstBuffer*>(buffersIt->data); - - // Send stream-start, segment and caps events downstream, along with the first buffer. - if (priv->newStreamEventPending) { - GRefPtr<GstElement> queue = adoptGRef(gst_pad_get_parent_element(pad)); - GRefPtr<GstPad> sinkPad = adoptGRef(gst_element_get_static_pad(queue.get(), "sink")); - GUniquePtr<gchar> queueName(gst_element_get_name(queue.get())); - GUniquePtr<gchar> streamId(g_strdup_printf("webaudio/%s", queueName.get())); - GstEvent* streamStartEvent = gst_event_new_stream_start(streamId.get()); -#if GST_CHECK_VERSION(1, 2, 0) - gst_event_set_group_id(streamStartEvent, groupId); -#endif - gst_pad_send_event(sinkPad.get(), streamStartEvent); - - GRefPtr<GstCaps> monoCaps = adoptGRef(getGStreamerMonoAudioCaps(priv->sampleRate)); - GstAudioInfo info; - gst_audio_info_from_caps(&info, monoCaps.get()); - GST_AUDIO_INFO_POSITION(&info, 0) = webKitWebAudioGStreamerChannelPosition(i); - GRefPtr<GstCaps> capsWithChannelPosition = adoptGRef(gst_audio_info_to_caps(&info)); - gst_pad_send_event(sinkPad.get(), gst_event_new_caps(capsWithChannelPosition.get())); - - gst_pad_send_event(sinkPad.get(), gst_event_new_segment(&priv->segment)); + ASSERT(priv->pool); + GstClockTime timestamp = gst_util_uint64_scale(priv->numberOfSamples, GST_SECOND, priv->sampleRate); + priv->numberOfSamples += priv->framesToPull; + GstClockTime duration = gst_util_uint64_scale(priv->numberOfSamples, GST_SECOND, priv->sampleRate) - timestamp; + + Vector<GRefPtr<GstBuffer>> channelBufferList; + channelBufferList.reserveInitialCapacity(priv->sources.size()); + for (unsigned i = 0; i < priv->sources.size(); ++i) { + GRefPtr<GstBuffer> buffer; + GstFlowReturn ret = gst_buffer_pool_acquire_buffer(priv->pool.get(), &buffer.outPtr(), nullptr); + if (ret != GST_FLOW_OK) { + for (auto& buffer : channelBufferList) + unmapGstBuffer(buffer.get()); + + // FLUSHING and EOS are not errors. + if (ret < GST_FLOW_EOS || ret == GST_FLOW_NOT_LINKED) + GST_ELEMENT_ERROR(src, CORE, PAD, ("Internal WebAudioSrc error"), ("Failed to allocate buffer for flow: %s", gst_flow_get_name(ret))); + gst_task_stop(src->priv->task.get()); + return; } - GstFlowReturn ret = gst_pad_chain(pad, channelBuffer); - if (ret != GST_FLOW_OK) - GST_ELEMENT_ERROR(src, CORE, PAD, ("Internal WebAudioSrc error"), ("Failed to push buffer on %s:%s flow: %s", GST_DEBUG_PAD_NAME(pad), gst_flow_get_name(ret))); + ASSERT(buffer); + GST_BUFFER_TIMESTAMP(buffer.get()) = timestamp; + GST_BUFFER_DURATION(buffer.get()) = duration; + mapGstBuffer(buffer.get(), GST_MAP_READWRITE); + priv->bus->setChannelMemory(i, reinterpret_cast<float*>(getGstBufferDataPointer(buffer.get())), priv->framesToPull); + channelBufferList.uncheckedAppend(WTFMove(buffer)); } - priv->newStreamEventPending = false; - - g_slist_free(channelBufferList); + // FIXME: Add support for local/live audio input. + priv->provider->render(nullptr, priv->bus, priv->framesToPull); + + ASSERT(channelBufferList.size() == priv->sources.size()); + bool failed = false; + for (unsigned i = 0; i < priv->sources.size(); ++i) { + // Unmap before passing on the buffer. + auto& buffer = channelBufferList[i]; + unmapGstBuffer(buffer.get()); + + if (failed) + continue; + + auto& appsrc = priv->sources[i]; + // Leak the buffer ref, because gst_app_src_push_buffer steals it. + GstFlowReturn ret = gst_app_src_push_buffer(GST_APP_SRC(appsrc.get()), buffer.leakRef()); + if (ret != GST_FLOW_OK) { + // FLUSHING and EOS are not errors. + if (ret < GST_FLOW_EOS || ret == GST_FLOW_NOT_LINKED) + GST_ELEMENT_ERROR(src, CORE, PAD, ("Internal WebAudioSrc error"), ("Failed to push buffer on %s flow: %s", GST_OBJECT_NAME(appsrc.get()), gst_flow_get_name(ret))); + gst_task_stop(src->priv->task.get()); + failed = true; + } + } } static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, GstStateChange transition) @@ -393,14 +378,10 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs case GST_STATE_CHANGE_NULL_TO_READY: if (!src->priv->interleave) { gst_element_post_message(element, gst_missing_element_message_new(element, "interleave")); - GST_ELEMENT_ERROR(src, CORE, MISSING_PLUGIN, (0), ("no interleave")); - return GST_STATE_CHANGE_FAILURE; - } - if (!src->priv->wavEncoder) { - gst_element_post_message(element, gst_missing_element_message_new(element, "wavenc")); - GST_ELEMENT_ERROR(src, CORE, MISSING_PLUGIN, (0), ("no wavenc")); + GST_ELEMENT_ERROR(src, CORE, MISSING_PLUGIN, (nullptr), ("no interleave")); return GST_STATE_CHANGE_FAILURE; } + src->priv->numberOfSamples = 0; break; default: break; @@ -413,16 +394,29 @@ static GstStateChangeReturn webKitWebAudioSrcChangeState(GstElement* element, Gs } switch (transition) { - case GST_STATE_CHANGE_READY_TO_PAUSED: + case GST_STATE_CHANGE_READY_TO_PAUSED: { GST_DEBUG_OBJECT(src, "READY->PAUSED"); - if (!gst_task_start(src->priv->task.get())) + + src->priv->pool = gst_buffer_pool_new(); + GstStructure* config = gst_buffer_pool_get_config(src->priv->pool.get()); + gst_buffer_pool_config_set_params(config, nullptr, src->priv->bufferSize, 0, 0); + gst_buffer_pool_set_config(src->priv->pool.get(), config); + if (!gst_buffer_pool_set_active(src->priv->pool.get(), TRUE)) + returnValue = GST_STATE_CHANGE_FAILURE; + else if (!gst_task_start(src->priv->task.get())) returnValue = GST_STATE_CHANGE_FAILURE; break; + } case GST_STATE_CHANGE_PAUSED_TO_READY: - src->priv->newStreamEventPending = true; GST_DEBUG_OBJECT(src, "PAUSED->READY"); + +#if GST_CHECK_VERSION(1, 4, 0) + gst_buffer_pool_set_flushing(src->priv->pool.get(), TRUE); +#endif if (!gst_task_join(src->priv->task.get())) returnValue = GST_STATE_CHANGE_FAILURE; + gst_buffer_pool_set_active(src->priv->pool.get(), FALSE); + src->priv->pool = nullptr; break; default: break; |