gnash-commit
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Gnash-commit] /srv/bzr/gnash/trunk r11300: made base implementation of


From: Ben Limmer
Subject: [Gnash-commit] /srv/bzr/gnash/trunk r11300: made base implementation of AudioInputGst and a sample testcase
Date: Wed, 22 Jul 2009 10:59:56 -0600
User-agent: Bazaar (1.13.1)

------------------------------------------------------------
revno: 11300
committer: Ben Limmer <address@hidden>
branch nick: trunk
timestamp: Wed 2009-07-22 10:59:56 -0600
message:
  made base implementation of AudioInputGst and a sample testcase
added:
  testsuite/libmedia.all/test_audioinput.cpp
modified:
  libmedia/gst/AudioInputGst.cpp
  libmedia/gst/AudioInputGst.h
  testsuite/libmedia.all/Makefile.am
    ------------------------------------------------------------
    revno: 11290.1.1
    committer: Ben Limmer <address@hidden>
    branch nick: cam_mic_impl_2
    timestamp: Wed 2009-07-22 10:56:41 -0600
    message:
      made base implementation of AudioInputGst and sample testcase
    added:
      testsuite/libmedia.all/test_audioinput.cpp
    modified:
      libmedia/gst/AudioInputGst.cpp
      libmedia/gst/AudioInputGst.h
      testsuite/libmedia.all/Makefile.am
=== modified file 'libmedia/gst/AudioInputGst.cpp'
--- a/libmedia/gst/AudioInputGst.cpp    2009-07-13 21:12:51 +0000
+++ b/libmedia/gst/AudioInputGst.cpp    2009-07-22 16:56:41 +0000
@@ -21,19 +21,560 @@
 #include "gnashconfig.h"
 #endif
 
+#include "gst/gst.h"
 #include "AudioInputGst.h"
+#include <gst/interfaces/propertyprobe.h>
 #include "log.h"
+#include "rc.h"
+
+namespace {
+    //get rc file for default mic selection
+    gnash::RcInitFile& rcfile = gnash::RcInitFile::getDefaultInstance();
+}
 
 namespace gnash {
 namespace media {
 namespace gst {
        AudioInputGst::AudioInputGst() {
-               log_unimpl("Audio Input constructor");
+               gst_init(NULL,NULL);
+        _numdevs = 0;
+        log_unimpl("Audio Input constructor");
        }
        
        AudioInputGst::~AudioInputGst() {
                log_unimpl("Audio Input destructor");
        }
+    
+    void
+    AudioInputGst::findAudioDevs() {
+        _numdevs = 0;
+        
+        //enumerate audio test sources
+        GstElement *element;
+        element = gst_element_factory_make ("audiotestsrc", "audtestsrc");
+        
+        if (element == NULL) {
+            log_error("%s: Could not create audio test source.\n", 
__FUNCTION__);
+            _audioVect.push_back(NULL);
+            _numdevs += 1;
+        } else {
+            _audioVect.push_back(new GnashAudio);
+            _audioVect[_numdevs]->setElementPtr(element);
+            
_audioVect[_numdevs]->setGstreamerSrc(g_strdup_printf("audiotestsrc"));
+            _audioVect[_numdevs]->setProductName(g_strdup_printf("audiotest"));
+            _numdevs += 1;
+        }
+        
+        
+        //detect pulse audio sources
+        GstPropertyProbe *probe;
+        GValueArray *devarr;
+        element = NULL;
+        gint i;
+        
+        element = gst_element_factory_make ("pulsesrc", "pulsesrc");
+        probe = GST_PROPERTY_PROBE (element);
+        devarr = gst_property_probe_probe_and_get_values_name (probe, 
"device");
+        for (i = 0; devarr != NULL && i < devarr->n_values; ++i) {
+            GValue *val;
+            gchar *dev_name = NULL;
+            
+            val = g_value_array_get_nth (devarr, i);
+            g_object_set (element, "device", g_value_get_string (val), NULL);
+            gst_element_set_state (element, GST_STATE_PLAYING);
+            g_object_get (element, "device-name", &dev_name, NULL);
+            gst_element_set_state (element, GST_STATE_NULL);
+            if (dev_name == "null" || (strstr(dev_name, "Monitor") != NULL)) {
+                log_trace("No pulse audio input devices.");
+            }
+            else { 
+                _audioVect.push_back(new GnashAudio);
+                _audioVect[_numdevs]->setElementPtr(element);
+                
_audioVect[_numdevs]->setGstreamerSrc(g_strdup_printf("pulsesrc"));
+                _audioVect[_numdevs]->setProductName(dev_name);
+                
+                gchar *location;
+                g_object_get (element, "device", &location , NULL);
+                _audioVect[_numdevs]->setDevLocation(location);
+                _numdevs += 1;
+            }
+        }
+        if (devarr) {
+            g_value_array_free (devarr);
+        }
+    }
+    
+    bool
+    AudioInputGst::checkSupportedFormats(GnashAudio *aud, GstCaps *caps) {
+        gint i;
+        gint num_structs;
+        
+        num_structs = gst_caps_get_size (caps);
+        bool ok = false;
+        
+        for (i=0; i < num_structs; i++) {
+            GstStructure *structure;
+            const GValue *width, *height;
+            
+            //this structure is used to probe the source for information
+            structure = gst_caps_get_structure (caps, i);
+            
+            //check to see if x-raw-int and/or x-raw-float are available to
+            //use with the selected microphone
+            if (!gst_structure_has_name (structure, "audio/x-raw-int") &&
+                !gst_structure_has_name (structure, "audio/x-raw-float")) 
+            {
+              continue;
+            } else {
+                ok = true;
+            }
+        }
+        return ok;
+    }
+    
+    void
+    AudioInputGst::getSelectedCaps(int devselect) {
+        GstElement *pipeline;
+        gchar *command;
+        GError *error = NULL;
+        GstStateChangeReturn return_val;
+        GstBus *bus;
+        GstMessage *message;
+        
+        GnashAudio *data_struct = _audioVect[devselect];
+        GstElement *element;
+        element = data_struct->getElementPtr();
+        
+        //create tester pipeline to enumerate properties
+        command = g_strdup_printf ("%s name=src device=%s ! fakesink",
+            data_struct->getGstreamerSrc(), data_struct->getDevLocation());
+        pipeline = gst_parse_launch(command, &error);
+        if ((pipeline != NULL) && (error == NULL)) {
+            //Wait at most 5 seconds for the pipeline to start playing
+            gst_element_set_state (pipeline, GST_STATE_PLAYING);
+            return_val = 
+                gst_element_get_state (pipeline, NULL, NULL, 5 * GST_SECOND);
+            
+            //errors on bus?
+            bus = gst_element_get_bus (pipeline);
+            message = gst_bus_poll (bus, GST_MESSAGE_ERROR, 0);
+            
+            if (GST_IS_OBJECT(bus)){
+                gst_object_unref (bus);
+            } else {
+                log_error("%s: Pipeline bus isn't an object for some reason",
+                    __FUNCTION__);
+            }
+            //if everything above worked properly, begin probing for values
+            if ((return_val == GST_STATE_CHANGE_SUCCESS) && (message == NULL)) 
{
+                GstElement *src;
+                GstPad *pad;
+                gchar *name;
+                GstCaps *caps;
+                
+                gst_element_set_state(pipeline, GST_STATE_PAUSED);
+                
+                src = gst_bin_get_by_name(GST_BIN(pipeline), "src");
+                
+                //get the pad, find the capabilities for probing in supported 
formats
+                pad  = gst_element_get_pad (src, "src");
+                caps = gst_pad_get_caps (pad);
+                if (GST_IS_OBJECT(pad)) {
+                    gst_object_unref (pad);
+                } else {
+                    log_error("%s: Template pad isn't an object for some 
reason",
+                        __FUNCTION__);
+                }
+                bool ok;
+                ok = checkSupportedFormats(data_struct, caps);
+                if (ok != true) {
+                    log_error("The input device you selected isn't supported 
(yet)");
+                } else {
+                    gst_caps_unref(caps);
+                }
+            }
+            gst_element_set_state (pipeline, GST_STATE_NULL);
+            if (GST_IS_OBJECT(pipeline)){
+                gst_object_unref (pipeline);
+            } else {
+                log_error("%s: pipeline isn't an object for some reason",
+                    __FUNCTION__);
+            }
+        }
+       
+        if (error) {
+          g_error_free (error);
+        }
+        g_free (command);
+    }
+    
+    GnashAudioPrivate*
+    AudioInputGst::transferToPrivate(int devselect) {
+        GnashAudioPrivate *audio = new GnashAudioPrivate;
+        if (audio != NULL) {
+            audio->setAudioDevice(_audioVect[devselect]);
+            audio->setDeviceName(_audioVect[devselect]->getProductName());
+            _globalAudio = audio;
+        } else {
+            log_error("%s: was passed a NULL pointer", __FUNCTION__);
+        }
+        return audio;
+    }
+    
+    gboolean
+    AudioInputGst::audioCreateSourceBin(GnashAudioPrivate *audio) {
+        GError *error = NULL;
+        gchar *command = NULL;
+        if(g_strcmp0(audio->_deviceName, "audiotest") == 0) {
+            log_trace("%s: You don't have any webcams chosen, using 
audiotestsrc",
+                __FUNCTION__);
+            audio->_audioSourceBin = gst_parse_bin_from_description (
+                "audiotestsrc name=audioSource",
+                TRUE, &error);
+            log_debug("Command: audiotestsrc name=audioSource");
+            audio->audioSource = gst_bin_get_by_name (GST_BIN 
(audio->_audioSourceBin),
+                        "audioSource");
+            return true;
+        } else {
+        command = g_strdup_printf ("%s name=audioSource device=%s ! capsfilter 
name=capsfilter 
caps=audio/x-raw-int,signed=true,channels=2,rate=44100;audio/x-raw-float,channels=2,rate=44100",
+            audio->_audioDevice->getGstreamerSrc(),
+            audio->_audioDevice->getDevLocation());
+        
+        log_debug ("GstPipeline command is: %s\n", command);
+        
+        audio->_audioSourceBin = gst_parse_bin_from_description(command, TRUE,
+                                    &error);
+        if (audio->_audioSourceBin == NULL) {
+            log_error ("%s: Creation of the audioSourceBin failed",
+                __FUNCTION__);
+            log_error ("the error was %s\n", error->message);
+            return false;
+        }
+        g_free(command);
+        audio->audioSource = gst_bin_get_by_name (GST_BIN 
(audio->_audioSourceBin),
+                    "audioSource");
+        return true;
+        }
+    }
+    
+    gboolean
+    AudioInputGst::audioCreateMainBin(GnashAudioPrivate *audio) {
+        GstElement *tee, *audioPlaybackQueue, *saveQueue;
+        gboolean ok;
+        GstPad  *pad;
+        
+        //initialize a new GST pipeline
+        audio->_pipeline = gst_pipeline_new("pipeline");
+        
+        audio->_audioMainBin = gst_bin_new ("audioMainBin");
+        
+        ok = audioCreateSourceBin(audio);
+        if (ok != true) {
+            log_error("%s: audioCreateSourceBin failed!", __FUNCTION__);
+        } else {
+            ok = false;
+        }
+        if ((tee = gst_element_factory_make ("tee", "tee")) == NULL) {
+            log_error("%s: problem creating tee element", __FUNCTION__);
+            return false;
+        }
+        if ((saveQueue = gst_element_factory_make("queue", "saveQueue")) == 
NULL) {
+            log_error("%s: problem creating save_queue element", __FUNCTION__);
+            return false;
+        }
+        if ((audioPlaybackQueue = 
+            gst_element_factory_make("queue", "audioPlaybackQueue")) == NULL) {
+            log_error("%s: problem creating audioPlaybackQueue element", 
__FUNCTION__);
+            return false;
+        }
+        gst_bin_add_many (GST_BIN (audio->_audioMainBin), 
audio->_audioSourceBin,
+                        tee, saveQueue, audioPlaybackQueue, NULL);
+        ok = gst_element_link(audio->_audioSourceBin, tee);
+        if (ok != true) {
+            log_error("%s: couldn't link audioSourceBin and tee", 
__FUNCTION__);
+            return false;
+        }
+        ok &= gst_element_link_many (tee, saveQueue, NULL);
+        if (ok != true) {
+            log_error("%s: couldn't link tee and saveQueue", __FUNCTION__);
+            return false;
+        }
+        ok &= gst_element_link_many (tee, audioPlaybackQueue, NULL);
+        if (ok != true) {
+            log_error("%s: couldn't link tee and audioPlaybackQueue", 
__FUNCTION__);
+            return false;
+        }
+        
+        gst_bin_add (GST_BIN(audio->_pipeline), audio->_audioMainBin);
+       
+        //add ghostpad to saveQueue (allows connections between bins)
+        pad = gst_element_get_pad (saveQueue, "src");
+        if (pad == NULL) {
+            log_error("%s: couldn't get saveQueueSrcPad", __FUNCTION__);
+            return false;
+        }
+        gst_element_add_pad (audio->_audioMainBin,
+            gst_ghost_pad_new ("saveQueueSrc", pad));
+        gst_object_unref (GST_OBJECT (pad));
+        
+        //add ghostpad to video_display_queue
+        pad = gst_element_get_pad (audioPlaybackQueue, "src");
+        if (pad == NULL) {
+            log_error("%s: couldn't get audioPlaybackQueue", __FUNCTION__);
+            return false;
+        }
+        gst_element_add_pad (audio->_audioMainBin,
+            gst_ghost_pad_new ("audioPlaybackQueueSrc", pad));
+        gst_object_unref (GST_OBJECT (pad));
+
+
+        if (!ok) {
+            log_error("%s: Unable to create main pipeline", __FUNCTION__);
+            return false;
+        }
+    }
+    
+    gboolean
+    AudioInputGst::audioCreatePlaybackBin(GnashAudioPrivate *audio) {
+        GstElement* autosink;
+        GstPad* pad;
+        gboolean ok;
+        
+        audio->_audioPlaybackBin = gst_bin_new("playbackBin");
+        
+        if ((autosink = gst_element_factory_make ("autoaudiosink", 
"audiosink")) == NULL) {
+             log_error("%s: There was a problem making the audiosink!", 
__FUNCTION__);
+             return false;
+        }
+        
+        ok = gst_bin_add(GST_BIN(audio->_audioPlaybackBin), autosink);
+        
+        ok &= gst_bin_add(GST_BIN(audio->_pipeline), audio->_audioPlaybackBin);
+        
+        //create ghostpad which can be used to connect this bin to the
+        //video_display_queue src ghostpad
+        pad = gst_element_get_pad (autosink, "sink");
+        gst_element_add_pad (audio->_audioPlaybackBin, gst_ghost_pad_new 
("sink", pad));
+        gst_object_unref (GST_OBJECT (pad));
+        
+        return ok;
+    }
+    
+    gboolean
+    AudioInputGst::makeAudioSourcePlaybackLink(GnashAudioPrivate *audio) {
+        gboolean ok;
+        GstPad *audioPlaybackQueueSrc, *audioPlaybackBinSink;
+        GstPadLinkReturn padreturn;
+        
+        audioPlaybackQueueSrc = gst_element_get_pad(audio->_audioMainBin,
+            "audioPlaybackQueueSrc");
+        audioPlaybackBinSink = gst_element_get_pad(audio->_audioPlaybackBin,
+            "sink");
+        
+        padreturn = gst_pad_link(audioPlaybackQueueSrc, audioPlaybackBinSink);
+        
+        if (padreturn == GST_PAD_LINK_OK) {
+            return true;
+        } else {
+            log_error("something went wrong in the makeSourcePlaybackLink 
function");
+            return false;
+        }
+    }
+    
+    //to handle messages while the main capture loop is running
+    gboolean
+    audio_bus_call (GstBus     *bus,
+              GstMessage *msg,
+              gpointer data)
+    {
+      switch (GST_MESSAGE_TYPE (msg)) {
+
+        case GST_MESSAGE_EOS:
+            log_trace ("End of stream\n");
+            g_main_loop_quit (((class GnashAudioPrivate *)data)->_loop);
+            break;
+        
+        case GST_MESSAGE_ERROR: {
+            gchar  *debug;
+            GError *error;
+
+            gst_message_parse_error (msg, &error, &debug);
+            g_free (debug);
+
+            log_error ("Error: %s\n", error->message);
+            g_error_free (error);
+            
+            g_main_loop_quit (((class GnashAudioPrivate *)data)->_loop);
+            break;
+        }
+        default:
+            break;
+      }
+
+      return TRUE;
+    }
+    
+    gboolean
+    AudioInputGst::audioCreateSaveBin(GnashAudioPrivate* audio) {
+        GstElement *audioConvert, *audioEnc, *filesink;
+        GstPad* pad;
+        gboolean ok;
+        
+        audio->_audioSaveBin = gst_bin_new ("audioSaveBin");
+        
+        if ((audioConvert = gst_element_factory_make("audioconvert", 
"audio_convert")) == NULL) {
+            log_error("%s: Couldn't make audioconvert element", __FUNCTION__);
+            return false;
+        }
+        if ((audioEnc = gst_element_factory_make("vorbisenc", "audio_enc")) == 
NULL){
+            log_error("%s: Couldn't make vorbisenc element", __FUNCTION__);
+            return false;
+        }
+        if ((audio->_mux = gst_element_factory_make("oggmux", "mux")) == NULL) 
{
+            log_error("%s: Couldn't make oggmux element", __FUNCTION__);
+            return false;
+        }
+        if ((filesink = gst_element_factory_make("filesink", "filesink")) == 
NULL) {
+            log_error("%s: Couldn't make filesink element", __FUNCTION__);
+            return false;
+        } else {
+            g_object_set(filesink, "location", "audioOut.ogg", NULL);
+        }
+        
+        gst_bin_add_many(GST_BIN(audio->_audioSaveBin), audioConvert, audioEnc,
+            audio->_mux, filesink, NULL);
+        
+        pad = gst_element_get_pad(audioConvert, "sink");
+        gst_element_add_pad(audio->_audioSaveBin, gst_ghost_pad_new ("sink", 
pad));
+        gst_object_unref (GST_OBJECT (pad));
+        
+        //gst_bin_add (GST_BIN(audio->_pipeline), audio->_audioSaveBin);
+        
+        ok = gst_element_link_many(audioConvert, audioEnc, audio->_mux,
+                filesink, NULL);
+        if (ok != true) {
+            log_error("%s: Something went wrong in linking", __FUNCTION__);
+        } else {
+            return true;
+        }
+    }
+    
+    gboolean
+    AudioInputGst::makeAudioSourceSaveLink (GnashAudioPrivate* audio) {
+        GstPad *audioSaveQueueSrc, *audioSaveBinSink;
+        GstPadLinkReturn padreturn;
+        
+        gst_bin_add(GST_BIN(audio->_pipeline), audio->_audioSaveBin);
+        
+        audioSaveQueueSrc = gst_element_get_pad(audio->_audioMainBin,
+            "saveQueueSrc");
+        audioSaveBinSink = gst_element_get_pad(audio->_audioSaveBin,
+            "sink");
+        
+        padreturn = gst_pad_link(audioSaveQueueSrc, audioSaveBinSink);
+        
+        if (padreturn == GST_PAD_LINK_OK) {
+            return true;
+        } else {
+            log_error("something went wrong in the makeAudioSourceSaveLink 
function");
+            return false;
+        }
+    }
+    
+    void
+    AudioInputGst::audioPlay(GnashAudioPrivate *audio) {
+        GstStateChangeReturn state;
+        GstBus *bus;
+        GMainLoop *loop;
+        gint ret;
+        
+        //setup bus to watch pipeline for messages
+        bus = gst_pipeline_get_bus (GST_PIPELINE (audio->_pipeline));
+        ret = gst_bus_add_watch (bus, audio_bus_call, audio);
+        gst_object_unref (bus);
+
+        //declare clock variables to record time (mainly useful in debug)
+        GstClockTime tfthen, tfnow;
+        GstClockTimeDiff diff;
+        
+        tfthen = gst_util_get_timestamp ();
+        state = gst_element_set_state (audio->_pipeline, GST_STATE_PLAYING);
+        
+        if (state == GST_STATE_CHANGE_SUCCESS) {
+            audio->_pipelineIsPlaying = true;
+        }
+        
+        loop = audio->_loop;
+        g_print("running (ctrl-c in terminal to quit).....\n");
+        g_main_loop_run(loop);
+        g_print("main loop done...\n");
+        tfnow = gst_util_get_timestamp ();
+        diff = GST_CLOCK_DIFF (tfthen, tfnow);
+        g_print(("Execution ended after %" G_GUINT64_FORMAT " ns.\n"), diff);
+    }
+    
+    void
+    AudioInputGst::makeAudioDevSelection() {
+        int devselect = -1;
+        devselect = rcfile.getAudioInputDevice();
+        if (devselect == -1) {
+            log_trace("No default audio input device specified, setting to 
testsrc\n");
+            rcfile.setAudioInputDevice(0);
+            devselect = rcfile.getAudioInputDevice();
+        } else {
+            log_trace("You've specified audio input %d in gnashrc, using that 
one\n",
+                devselect);
+        }
+        
+        getSelectedCaps(devselect);
+        
+        GnashAudioPrivate *audio = NULL;
+        audio = transferToPrivate(devselect);
+        if (audio == NULL) {
+            log_error("%s: Couldn't transferToPrivate structure", 
__FUNCTION__);
+        }
+        
+        bool ok = false;
+        ok = audioCreateMainBin(audio);
+        if (ok != true) {
+            log_error("%s: Couldn't make the audioMainBin", __FUNCTION__);
+        } else {
+            ok = false;
+        }
+        
+        ok = audioCreatePlaybackBin(audio);
+        if (ok != true) {
+            log_error("%s: Couldn't make the audioPlaybackBin", __FUNCTION__);
+        } else {
+            ok = false;
+        }
+        
+        ok = audioCreateSaveBin(audio);
+        if (ok != true) {
+            log_error("%s: Couldn't make the audioSaveBin", __FUNCTION__);
+        } else {
+            ok = false;
+        }
+        
+        ok = makeAudioSourcePlaybackLink(audio);
+        if (ok != true) {
+            log_error("%s: Couldn't make the Source-Playback link", 
__FUNCTION__);
+        } else {
+            ok = false;
+        }
+        
+        ok = makeAudioSourceSaveLink(audio);
+        if (ok != true) {
+            log_error("%s: Couldn't make the Source-Save link", __FUNCTION__);
+        } else {
+            ok = false;
+        }
+        
+        //debug
+        //g_print("starting pipeline....\n");
+        audioPlay(audio);
+        
+    }
 
 } //gst namespace
 } //media namespace

=== modified file 'libmedia/gst/AudioInputGst.h'
--- a/libmedia/gst/AudioInputGst.h      2009-07-13 21:12:51 +0000
+++ b/libmedia/gst/AudioInputGst.h      2009-07-22 16:56:41 +0000
@@ -20,20 +20,384 @@
 #define GNASH_AUDIOINPUTGST_H
 
 #include <boost/cstdint.hpp> // for C99 int types
+#include "gst/gst.h"
 #include "AudioInput.h"
+#include <vector>
 
 namespace gnash {
 namespace media {
 namespace gst {
 
-
-class AudioInputGst : public AudioInput {
+/// \class GnashAudio
+///
+/// \brief Contains information about audio input devices while enumerating
+///  information about attached hardware. This class is also referred back to
+///  by GnashAudioPrivate to re-access enumerated information.
+class GnashAudio {
+    public:
+        /// \brief Accessor to retreive a the private _element variable
+        ///       from the GnashAudio class which contains a pointer
+        ///       to the audio source element.
+        ///
+        /// @return GstElement* to the audio source element
+        GstElement* getElementPtr() {return _element;};
+        
+        /// \brief Accessor to set the private _element variable from
+        ///       the GnashAudio class.
+        ///
+        /// @param element The GstElement pointer to the audio source element.
+        void setElementPtr(GstElement* element) {_element = element;};
+        
+        /// \brief Accessor to get the private _devLocation variable from
+        ///       the GnashAudio class.
+        ///
+        /// @return The _devLocation private variable from GnashAudio class.
+        gchar* getDevLocation() {return _devLocation;};
+        
+        /// \brief Accessor to set the private _devLocation variable from
+        ///       the GnashAudio class.
+        ///
+        /// @param l A gchar* containing the physical location of the audio
+        ///       input hardware device.
+        void setDevLocation(gchar *l) {_devLocation = l;};
+        
+        /// \brief Accessor to return the private _gstreamerSrc variable
+        ///       from the GnashAudio class.
+        ///
+        /// @return The _gstreamerSrc variable from the GnashAudio class.
+        ///        which should contain the type of the Gstreamer audio source
+        ///        element (e.g. pulsesrc).
+        gchar* getGstreamerSrc() {return _gstreamerSrc;};
+        
+        /// \brief Accessor to set the private _gstreamerSrc variable
+        ///       from the GnashAudio class.
+        ///
+        /// @param s A gchar* containing the type of the Gstreamer source
+        ///         element type (e.g. pulsesrc)
+        void setGstreamerSrc(gchar *s) {_gstreamerSrc = s;};
+        
+        /// \brief Accessor to get the private _productName variable
+        ///       from the GnashAudio class.
+        ///
+        /// @return A gchar* containing the audio input's hardware name
+        ///       (e.g. HDA Intel).
+        gchar* getProductName() {return _productName;};
+        
+        /// \brief Accessor to set the private _productName variable
+        ///       from the GnashAudio class.
+        ///
+        /// @param n A gchar* to the hardware input device's hardware name
+        ///         (e.g. HDA Intel).
+        void setProductName(gchar *n) {_productName = n;};
+
+        /// Constructor for the GnashAudio class.
+        GnashAudio();
+        
+    private:
+        /// \var GnashAudio::_element
+        /// \brief GstElement* which points to the audio source
+        ///       element.
+        GstElement* _element;
+        
+        /// \var GnashAudio::_devLocation
+        /// \brief Contains the physical location of the audio input device
+        gchar* _devLocation;
+        
+        /// \var GnashAudio::_gstreamerSrc
+        /// \brief Contains a gchar* which describes the gstreamer source
+        ///       type (e.g. pulseaudiosrc or jackaudiosrc).
+        gchar* _gstreamerSrc;
+        
+        /// \var GnashAudio::_productName
+        /// \brief Contains a gchar* which describes the name of the hardware
+        ///      device (e.g. Built-In Microphone or HDA Intel).
+        gchar* _productName;
+};
+
+GnashAudio::GnashAudio() {
+    _element = NULL;
+    _devLocation = NULL;
+    _gstreamerSrc = NULL;
+    _productName = NULL;
+}
+
+/// \class GnashAudioPrivate
+///
+/// \brief This class is initialized once a hardware input device is chosen
+///   it is more robust than GnashAudio because it has additional room to store
+///   important Gstreamer information (pipelines, references to elements, etc.)
+class GnashAudioPrivate {
+    public:
+        /// \var audioSource
+        /// \brief A pointer to the Gstreamer element corresponding to the 
+        ///   audio source (e.g. a built-in or usb microphone).
+        GstElement *audioSource;
+        
+        /// \var audioEnc
+        /// \brief A pointer to the audio encoder element of the Gstreamer
+        ///   pipeline. The only format currently supported format is vorbis
+        GstElement *audioEnc;
+        
+        /// Constructor for the GnashAudioPrivate class.
+        GnashAudioPrivate();
+        
+        /// \brief This function sets the private _audioDevice element in
+        ///   the GnashAudioPrivate class
+        /// @param d A pointer to the GnashAudio class that you want to
+        ///   use for audio input.
+        void setAudioDevice(GnashAudio* d) {_audioDevice = d;}
+        
+        /// \brief This function returns the private _audioDevice element 
pointer
+        ///   from the GnashAudioPrivate class.
+        /// @return The GnashAudio* stored in the _audioDevice variable
+        GnashAudio* getAudioDevice() {return _audioDevice;}
+        
+        /// \brief This function sets the private _deviceName element in the
+        ///   GnashAudioPrivate class.
+        /// @param n A gchar* describing the input hardware (e.g. HDA Intel)
+        void setDeviceName(gchar* n) {_deviceName = n;}
+        
+        /// \brief This function returns the private _deviceName variable from 
the
+        ///   GnashAudioPrivate class.
+        /// @return The gchar* describing the physical device's name (e.g. HDA 
Intel)
+        gchar* getDeviceName() {return _deviceName;}
+    
+    //FIXME: I can't figure out why this isn't working right. Since I made 
+    // AudioInputGst inherit from GnashAudioPrivate it should be able to access
+    // protected variables, but I can't get it to work!    
+    //protected:
+        /// \var _audioDevice
+        /// \brief A pointer to the GnashAudio class of the selected hardware 
device
+        ///   This info should be stored to the GnashAudioPrivate class in the
+        ///   transferToPrivate function.
+        GnashAudio* _audioDevice;
+        
+        /// \var _deviceName
+        /// \brief A gchar* describing the physical input device's name
+        ///   (e.g. HDA Intel or Built-In Microphone)
+        gchar* _deviceName;
+        
+        /// \var _pipeline
+        /// \brief A pointer to the main Gstreamer pipeline that all
+        ///      created elements and bins will be dropped into.
+        GstElement* _pipeline;
+        
+        /// \var _audioMainBin
+        /// The main bin is set up to handle any number of connections to be 
made
+        /// later in the program. The basic pipeline design is as follows:
+        /// (sink ghostpad) tee ! audioPlaybackQueue
+        /// (sink ghostpad) tee ! audioSaveQueue
+        /// The source bin is dropped into the main bin and will eventually be
+        /// fed into the tee element
+        GstElement* _audioMainBin;
+        
+        /// \var _audioSourceBin
+        /// The audio source bin contains the source device and a restriction 
on
+        /// its capabilities. Currently a lot of stuff in here is hardcoded and
+        /// will probably eventually be made into options that can be changed
+        /// using setters. The basic pipeline design is as follows:
+        /// <selected audio source> ! capsfiter
+        /// The source bin is dropped into the _audioMainBin.
+        GstElement* _audioSourceBin;
+        
+        /// \var _audioPlaybackBin
+        /// The audio playback bin contains the elements necessary to playback
+        /// the audio being captured by the selected device. Note that if you
+        /// create the playback bin it will not automatically link up to the
+        /// playback queue. To do that you need to call 
makeAudioSourcePlaybackLink()
+        /// function. The basic pipeline design is as follows:
+        /// autoaudiosink ! NULL
+        GstElement* _audioPlaybackBin;
+        
+        /// \var _audioSaveBin
+        /// The audio save bin contains the elements necessary to save the 
audio
+        /// being captured to a file (currently just to an ogg file). Note 
that if
+        /// you create the save bin it will not automatically link up to the
+        /// save queue in the main bin. To do that you need to call the
+        /// makeAudioSourceSaveLink() function. The basic pipeline structure is
+        /// as follows:
+        /// audioconvert ! vorbinenc ! oggmux ! filesink
+        GstElement* _audioSaveBin;
+        
+        /// \var _mux
+        /// \brief A direct link to the oggmux element in the _audioSaveBin for
+        /// use with linking up to a video muxer so that audio and video are 
both
+        /// muxed out to the same file.
+        GstElement* _mux;
+        
+        /// \var _pipelineIsPlaying
+        /// \brief A boolean value which stores whether or not the _pipeline
+        /// element is currently in it's 'playing' state or not
+        gboolean _pipelineIsPlaying;
+        
+        /// \var _loop
+        /// \brief A GMainLoop to keep recording input from the device.
+        GMainLoop* _loop;
+};
+
+GnashAudioPrivate::GnashAudioPrivate() {
+    audioSource = NULL;
+    audioEnc = NULL;
+    _audioDevice = NULL;
+    _deviceName = NULL;
+    _pipeline = NULL;
+    _audioSourceBin = NULL;
+    _audioSaveBin = NULL;
+    _pipelineIsPlaying = false;
+    _loop = g_main_loop_new(NULL, false);
+    _mux = NULL;
+};
+
+/// \class AudioInputGst
+/// \brief The main AudioInputGst class, which actually doesn't store too
+/// much important information (most of that is stored in the GnashAudio
+/// and GnashAudioPrivate classes)
+class AudioInputGst : public AudioInput, public GnashAudioPrivate {
        
 public:
-
+    /// \brief AudioInputGst class constructor
        AudioInputGst();
 
+    /// \brief AudioInputGst class destructor
        ~AudioInputGst();
+    
+    /// \brief This function enumerates information about the audio input 
devices
+    /// attached to the machine and stores them in the _audioVect vector.
+    /// @param Nothing.
+    /// @return Nothing. All important information is now stored in the 
_audioVect
+    ///   element.
+    void findAudioDevs();
+    
+    /// \brief This function is currently the workhorse of this function. It
+    /// looks in the gnashrc file and checks for a default microphone input 
device.
+    /// If one is not selected, the audiotestsrc is used by default. This 
function
+    /// also currently calls the functions to make the GstBins, pipelines and
+    /// element connections.
+    void makeAudioDevSelection();
+    
+    /// \brief This function grabs information about the selected audio input
+    /// device. It also calls checkSupportedFormats to make sure that Gnash
+    /// can handle the input formats supported by the source device.
+    /// @param devselect The integer value describing the selected microphone
+    ///  This should probably be changed eventually to a more robust selection
+    ///  method (rather than a seemingly random integer value)
+    void getSelectedCaps(int devselect);
+    
+    /// \brief This function checks the format information enumerated in
+    ///  getSelectedCaps and makes sure that Gnash can handle said input.
+    /// @param aud A pointer to a GnashAudio class that represents the
+    ///  selected device.
+    /// @param caps A pointer to the capabilities of the device as enumerated
+    ///  in the getSelectedCaps function
+    /// @return A boolean value (true means that the device has at least one 
+    ///  supported format, false means that the device has no supported 
formats)
+    bool checkSupportedFormats(GnashAudio *aud, GstCaps *caps);
+    
+    /// \brief This function transfers the selected audio device from a 
GnashAudio
+    ///  class to a GnashAudioPrivate class. This function is called once the
+    ///  device selection has been made.
+    /// @param devselect The integer value describing the selected microphone.
+    ///   This should probably be changed eventually to a more robust selection
+    ///   method (rather than a seemingly random integer value)
+    GnashAudioPrivate* transferToPrivate(int devselect);
+    
+    /// This function creates the main audio bin. A reference to this bin is
+    /// stored in a GnashWebcamPrivate class structure under the _audioMainBin
+    /// variable. See the description of _audioMainBin for a pipeline 
description.
+    /// @param audio A pointer to the GnashAudioPrivate class structure of the
+    ///  selected audio input device.
+    /// @return True if the bin was created successfully, false otherwise.
+    gboolean audioCreateMainBin (GnashAudioPrivate *audio);
+    
+    /// This function creates the audio source bin. A reference to this bin is
+    /// stored in a GnashWebcamPrivate class structure under the 
_audioSourceBin
+    /// variable. See the description of _audioSourceBin for a pipeline 
description.
+    /// @param audio A pointer to the GnashAudioPrivate class structure of the
+    ///   selected audio input device.
+    /// @return True if the bin was created successfully, false otherwise.
+    gboolean audioCreateSourceBin (GnashAudioPrivate *audio);
+    
+    /// This function creates the audio playback bin. A reference to this bin 
is
+    /// stored in a GnashWebcamPrivate class structure under the 
_audioPlaybackBin
+    /// variable. See the description of _audioPlaybackBin for a pipeline 
description.
+    /// IMPORTANT: If you make the playback bin, it's not automatically linked 
up
+    ///  and activated. You must call the makeAudioSourcePlaybackLink() 
function.
+    /// @param audio A pointer to the GnashAudioPrivate class structure of the
+    ///   selected audio input device.
+    /// @return True if the bin was created successfully, false otherwise.
+    gboolean audioCreatePlaybackBin (GnashAudioPrivate *audio);
+    
+    /// This function makes the link between the audio playback queue (which
+    /// receives an audio stream from the source device) and the playback 
element.
+    /// It's important to note that if you create the playback bin, you must
+    /// make sure to call makeAudioSourcePlaybackLink() so that the links are
+    /// all made properly.
+    /// @param audio A pointer to the GnashAudioPrivate class structure of the
+    ///   selected audio input device.
+    /// @return True if the link was made successfully, false otherwise.
+    gboolean makeAudioSourcePlaybackLink (GnashAudioPrivate *audio);
+    
+    /// This function makes the link between the audio save queue (which 
receives
+    /// an audio stream from the source device) and the respective save 
elements.
+    /// It's important to note that if you create the save bin you must make 
sure
+    /// to call makeAudioSourceSaveLink() so that the links are all made 
properly
+    /// and it can successfully activate when the pipeline starts.
+    /// @param audio A pointer to the GnashAudioPrivate class structure of the
+    ///   selected audio input device.
+    /// @return True if the link was made successfully, false otherwise.
+    gboolean makeAudioSourceSaveLink (GnashAudioPrivate *audio);
+    
+    /// This function creates the audio save bin. A reference to this bin is
+    /// stored in a GnashWebcamPrivate class structure under the _audioSaveBin
+    /// variable. See the description of _audioSaveBin for a pipeline 
description.
+    /// IMPORTANT: If you make the save bin, it's not automatically linked up
+    ///  and activated. You must call the makeAudioSourceSaveLink() function.
+    /// @param audio A pointer to the GnashAudioPrivate class structure of the
+    ///   selected audio input device.
+    /// @return True if the bin was created successfully, false otherwise.
+    gboolean audioCreateSaveBin (GnashAudioPrivate *audio);
+    
+    /// This function should be called when the desired pipeline is created. 
This
+    /// will do the proper cleanup and activate the pipeline described in the 
audio
+    /// parameter.
+    /// @param audio A pointer to the GnashAudioPrivate class structure 
containing
+    ///   the pipeline to start up.
+    void audioPlay(GnashAudioPrivate *audio);
+    
+    /// \brief Function returns the total number of devices detected (useful in
+    ///  iterating through the _audioVect vector.
+    /// @return The _numdevs private variable from the AudioInputGst class.
+    int getNumdevs() {return _numdevs;}
+    
+    /// \brief Function sets the private _numdevs variable in the AudioInputGst
+    /// class.
+    /// @param n The integer value representing the desired value to set 
_numdevs
+    /// to.
+    void setNumdevs(int n) {_numdevs = n;}
+    
+    /// \brief Function returns a pointer to the private _audioVect element 
from
+    ///  AudioInputGst class.
+    std::vector<GnashAudio*>* getAudioVect() {return &_audioVect;}
+    
+    
+private:
+    /// \var _numdevs
+    /// \brief Contains an integer value representing the number of devices 
found
+    /// on the machine.
+    int _numdevs;
+    
+    /// \var _audioVect
+    /// \brief A vector of GnashAudio pointers. This is used when storing 
information
+    ///  about attached devices. This vector is accessed using the integer 
value
+    ///  from the gnashrc file (e.g. set microphoneDevice 2 means you're 
accessing
+    ///  _audioVect[2])
+    std::vector<GnashAudio*> _audioVect;
+    
+    /// \var _globalAudio
+    /// \brief A global pointer to the GnashAudioPrivate class for the selected
+    ///  device. This is useful so you don't have to keep relying on the 
_audioVect
+    ///  vector.
+    GnashAudioPrivate* _globalAudio;
 
 };
 

=== modified file 'testsuite/libmedia.all/Makefile.am'
--- a/testsuite/libmedia.all/Makefile.am        2009-07-16 21:21:10 +0000
+++ b/testsuite/libmedia.all/Makefile.am        2009-07-22 16:56:41 +0000
@@ -56,11 +56,16 @@
 if USE_GST_ENGINE
 
  check_PROGRAMS += \
-       test_videoinput
+       test_videoinput \
+       test_audioinput
 
  test_videoinput_SOURCES = test_videoinput.cpp
  test_videoinput_LDADD = $(AM_LDFLAGS) 
  test_videoinput_DEPENDENCIES = site-update
+ 
+ test_audioinput_SOURCES = test_audioinput.cpp
+ test_audioinput_LDADD = $(AM_LDFLAGS)
+ test_audioinput_DEPENDENCIES = site-update
 
 endif
 

=== added file 'testsuite/libmedia.all/test_audioinput.cpp'
--- a/testsuite/libmedia.all/test_audioinput.cpp        1970-01-01 00:00:00 
+0000
+++ b/testsuite/libmedia.all/test_audioinput.cpp        2009-07-22 16:56:41 
+0000
@@ -0,0 +1,91 @@
+// 
+//   Copyright (C) 2009 Free Software Foundation, Inc.
+// 
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation; either version 3 of the License, or
+// (at your option) any later version.
+// 
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+// 
+// You should have received a copy of the GNU General Public License
+// along with this program; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+#ifdef HAVE_CONFIG_H
+#include "gnashconfig.h"
+#endif
+
+#ifdef HAVE_DEJAGNU_H
+
+#include <boost/shared_ptr.hpp>
+#include <string>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <iostream>
+#include <string>
+#include <cstdio>
+
+#include "dejagnu.h"
+#include "log.h"
+
+#include "gst/AudioInputGst.h"
+#include <vector>
+
+
+using namespace gnash;
+using namespace media;
+using namespace gst;
+using namespace std;
+
+static void usage (void);
+
+static TestState runtest;
+
+static string infile;
+
+static void test_client();
+
+LogFile& dbglogfile = LogFile::getDefaultInstance();
+
+int
+main(int argc, char *argv[])
+{   
+    test_client();
+    return 0;
+}
+
+static void test_client()
+{
+       //create a test class, call constructor
+       gst::AudioInputGst aud;
+       aud.findAudioDevs();
+       
+       std::vector<GnashAudio*> *audioVect = aud.getAudioVect();
+       
+       
+       //aud.makeAudioDevSelection();
+       
+       cerr << "placeholder" << endl;
+    
+}
+
+
+#else
+
+int
+main(int /*argc*/, char /* *argv[]*/)
+{
+  // nop
+    cerr << "This program needs to have DejaGnu installed!" << endl;
+    return 0;  
+}
+
+#endif


reply via email to

[Prev in Thread] Current Thread [Next in Thread]