[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Gnash-commit] /srv/bzr/gnash/trunk r11446: Implement part of an interfa
From: |
Benjamin Wolsey |
Subject: |
[Gnash-commit] /srv/bzr/gnash/trunk r11446: Implement part of an interface for VideoInput, add notes on what's required |
Date: |
Fri, 28 Aug 2009 15:28:01 +0200 |
User-agent: |
Bazaar (1.16.1) |
------------------------------------------------------------
revno: 11446 [merge]
committer: Benjamin Wolsey <address@hidden>
branch nick: trunk
timestamp: Fri 2009-08-28 15:28:01 +0200
message:
Implement part of an interface for VideoInput, add notes on what's required
for the rest of it.
The advantages of this are that it is possible to implement Camera as a Relay,
to assign the MediaHandler dynamically, to drop ifdefs, to make it possible
to implement an ffmpeg VideoInput class, and generally to make the code
into a half-way useful design.
The disadvantage is that many unit tests had to be dropped. Because the
design has changed (and needs to change much more) they weren't useful.
removed:
libmedia/VideoInput.cpp
added:
libmedia/ffmpeg/VideoInputFfmpeg.cpp
libmedia/ffmpeg/VideoInputFfmpeg.h
modified:
libcore/asobj/flash/media/Camera_as.cpp
libcore/asobj/flash/media/Microphone_as.cpp
libmedia/AudioInput.h
libmedia/Makefile.am
libmedia/MediaHandler.h
libmedia/VideoInput.h
libmedia/ffmpeg/MediaHandlerFfmpeg.cpp
libmedia/ffmpeg/MediaHandlerFfmpeg.h
libmedia/gst/MediaHandlerGst.cpp
libmedia/gst/MediaHandlerGst.h
libmedia/gst/VideoInputGst.cpp
libmedia/gst/VideoInputGst.h
testsuite/actionscript.all/Microphone.as
testsuite/libmedia.all/test_videoinput.cpp
=== modified file 'libcore/asobj/flash/media/Camera_as.cpp'
--- a/libcore/asobj/flash/media/Camera_as.cpp 2009-08-26 12:15:53 +0000
+++ b/libcore/asobj/flash/media/Camera_as.cpp 2009-08-28 11:03:35 +0000
@@ -31,15 +31,11 @@
#include "NativeFunction.h"
#include "Object.h"
#include "Array_as.h"
+#include "MediaHandler.h"
+#include "VideoInput.h"
+
#include <sstream>
-#ifdef USE_GST
-#include "gst/VideoInputGst.h"
-#endif
-
-#ifdef USE_FFMPEG
-#include "VideoInput.h"
-#endif
namespace gnash {
@@ -54,8 +50,7 @@
as_value camera_activitylevel(const fn_call& fn);
as_value camera_bandwidth(const fn_call& fn);
-as_value camera_currentFPS(const fn_call& fn); //as3
-as_value camera_currentFps(const fn_call& fn); //as2
+as_value camera_currentFps(const fn_call& fn);
as_value camera_fps(const fn_call& fn);
as_value camera_height(const fn_call& fn);
as_value camera_index(const fn_call& fn);
@@ -151,28 +146,92 @@
return o.get();
}
-#ifdef USE_GST
-class camera_as_object: public as_object, public media::gst::VideoInputGst {
-
-public:
-
- camera_as_object()
- :
- as_object(getCameraInterface())
- {}
-};
-#endif
-
-#ifdef USE_FFMPEG
-class camera_as_object: public as_object, public media::VideoInput {
-public:
-
- camera_as_object()
- :
- as_object(getCameraInterface())
- {}
-};
-#endif
+class Camera_as: public as_object
+{
+public:
+
+ Camera_as(media::VideoInput* input)
+ :
+ as_object(getCameraInterface()),
+ _input(input),
+ _loopback(false)
+ {
+ assert(input);
+ }
+
+ bool muted() const {
+ return _input->muted();
+ }
+
+ size_t width() const {
+ return _input->width();
+ }
+
+ size_t height() const {
+ return _input->height();
+ }
+
+ double fps() const {
+ return _input->fps();
+ }
+
+ double currentFPS() const {
+ return _input->currentFPS();
+ }
+
+ double activityLevel() const {
+ return _input->activityLevel();
+ }
+
+ double bandwidth() const {
+ return _input->bandwidth();
+ }
+
+ size_t index() const {
+ return _input->index();
+ }
+
+ void setMode(size_t width, size_t height, double fps, bool favorArea) {
+ _input->requestMode(width, height, fps, favorArea);
+ }
+
+ void setMotionLevel(size_t level, double timeout) {
+ _input->setMotionLevel(level);
+ _input->setMotionTimeout(timeout);
+ }
+
+ double motionLevel() const {
+ return _input->motionLevel();
+ }
+
+ double motionTimeout() const {
+ return _input->motionTimeout();
+ }
+
+ const std::string& name() const {
+ return _input->name();
+ }
+
+ size_t quality() const {
+ return _input->quality();
+ }
+
+ void setQuality(double bandwidth, size_t quality) {
+ _input->setBandwidth(bandwidth);
+ _input->setQuality(quality);
+ }
+
+ void setLoopback(bool b) {
+ _loopback = b;
+ }
+
+private:
+
+ media::VideoInput* _input;
+
+ // TODO: see whether this should be handled in the VideoInput class
+ bool _loopback;
+};
// AS2 static accessor.
as_value
@@ -189,22 +248,36 @@
// meant, not a new object each time. It will be necessary to query
// the MediaHandler for this, and possibly to store the as_objects
// somewhere.
- boost::intrusive_ptr<as_object> obj = new camera_as_object;
-
-
- int numargs = fn.nargs;
- if (numargs > 0) {
+ //
+ media::MediaHandler* handler = media::MediaHandler::get();
+ if (!handler) {
+ log_error(_("No MediaHandler exists! Cannot create a Camera object"));
+ return as_value();
+ }
+ media::VideoInput* input = handler->getVideoInput(0);
+
+ if (!input) {
+ // TODO: what should happen if the index is not available?
+ return as_value();
+ }
+
+ boost::intrusive_ptr<as_object> obj = new Camera_as(input);
+
+ const size_t nargs = fn.nargs;
+ if (nargs > 0) {
log_debug("%s: the camera is automatically chosen from gnashrc",
- __FUNCTION__);
+ "Camera.get()");
}
- return as_value(obj.get()); // will keep alive
+ return as_value(obj.get());
}
// AS3 static accessor.
as_value
camera_getCamera(const fn_call& fn)
{
- boost::intrusive_ptr<as_object> obj = new camera_as_object;
+ media::VideoInput* input = media::MediaHandler::get()->getVideoInput(0);
+
+ boost::intrusive_ptr<as_object> obj = new Camera_as(input);
int numargs = fn.nargs;
if (numargs > 0) {
@@ -216,66 +289,21 @@
as_value
camera_setmode(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
-
- int numargs = fn.nargs;
- switch (numargs) {
- case 4:
- ptr->set_width(fn.arg(0).to_int());
- ptr->set_height(fn.arg(1).to_int());
- ptr->set_fps(fn.arg(2).to_int());
- log_unimpl("Camera_as::setmode argument 4 (favorArea)");
-#ifdef USE_GST
- ptr->webcamChangeSourceBin(ptr->getGlobalWebcam());
-#endif
- break;
- case 3:
- ptr->set_width(fn.arg(0).to_int());
- ptr->set_height(fn.arg(1).to_int());
- ptr->set_fps(fn.arg(2).to_int());
-#ifdef USE_GST
- ptr->webcamChangeSourceBin(ptr->getGlobalWebcam());
-#endif
- break;
- case 2:
- ptr->set_width(fn.arg(0).to_int());
- ptr->set_height(fn.arg(1).to_int());
- if (ptr->get_fps() != 15) {
- ptr->set_fps(15);
- }
-#ifdef USE_GST
- ptr->webcamChangeSourceBin(ptr->getGlobalWebcam());
-#endif
- break;
- case 1:
- ptr->set_width(fn.arg(0).to_int()); //set to the specified width
argument
- if (ptr->get_height() != 120) {
- ptr->set_height(120);
- }
- if (ptr->get_fps() != 15) {
- ptr->set_fps(15);
- }
-#ifdef USE_GST
- ptr->webcamChangeSourceBin(ptr->getGlobalWebcam());
-#endif
- break;
- case 0:
- log_debug("%s: no arguments passed, using default values",
__FUNCTION__);
- if (ptr->get_width() != 160) {
- ptr->set_width(160);
- }
- if (ptr->get_height() != 120) {
- ptr->set_height(120);
- }
- if (ptr->get_fps() != 15) {
- ptr->set_fps(15);
- }
-#ifdef USE_GST
- ptr->webcamChangeSourceBin(ptr->getGlobalWebcam());
-#endif
- break;
- }
-
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
+
+ const size_t nargs = fn.nargs;
+
+ const double width = nargs ? fn.arg(0).to_number() : 160;
+ const double height = nargs > 1 ? fn.arg(1).to_number() : 120;
+ const double fps = nargs > 2? fn.arg(2).to_number() : 15;
+ const bool favorArea = nargs > 3 ? fn.arg(3).to_bool() : true;
+
+ // TODO: handle overflow
+ const size_t reqWidth = std::max<double>(width, 0);
+ const size_t reqHeight = std::max<double>(height, 0);
+
+ ptr->setMode(reqWidth, reqHeight, fps, favorArea);
+
return as_value();
}
@@ -283,51 +311,18 @@
camera_setmotionlevel(const fn_call& fn)
{
log_unimpl ("Camera::motionLevel can be set, but it's not implemented");
- boost::intrusive_ptr<camera_as_object> ptr = ensureType<camera_as_object>
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>
(fn.this_ptr);
- int numargs = fn.nargs;
- if (numargs > 2) {
- log_error("%s: Too many arguments", __FUNCTION__);
- } else {
- switch (numargs) {
- case 0:
- log_debug("%s: no args passed, using defaults", __FUNCTION__);
- if (ptr->get_motionLevel() != 50) {
- ptr->set_motionLevel(50);
- }
- if (ptr->get_motionTimeout() != 2000) {
- ptr->set_motionTimeout(2000);
- }
- break;
- case 1:
- {
- double argument = fn.arg(0).to_number();
- if ((argument >= 0) && (argument <= 100)) {
- ptr->set_motionLevel(argument);
- } else {
- log_error("%s: bad value passed for first argument",
__FUNCTION__);
- ptr->set_motionLevel(100);
- }
- if (ptr->get_motionTimeout() != 2000) {
- ptr->set_motionTimeout(2000);
- }
- break;
- }
- case 2:
- {
- double argument1 = fn.arg(0).to_number();
- if ((argument1 >= 0) && (argument1 <= 100)) {
- ptr->set_motionLevel(argument1);
- } else {
- log_error("%s: bad value passed for first argument",
__FUNCTION__);
- ptr->set_motionLevel(100);
- }
- ptr->set_motionTimeout(fn.arg(1).to_number());
- break;
- }
- }
- }
+ const size_t nargs = fn.nargs;
+
+ const double ml = nargs > 0 ? fn.arg(0).to_number() : 50;
+ const double mt = nargs > 1 ? fn.arg(1).to_number() : 2000;
+
+ const size_t motionLevel = (ml >= 0 && ml <= 100) ? ml : 100;
+
+ ptr->setMotionLevel(motionLevel, mt);
+
return as_value();
}
@@ -336,43 +331,18 @@
camera_setquality(const fn_call& fn)
{
log_unimpl ("Camera::quality can be set, but it's not implemented");
- boost::intrusive_ptr<camera_as_object> ptr = ensureType<camera_as_object>
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>
(fn.this_ptr);
-
- int numargs = fn.nargs;
- if (numargs > 2) {
- log_error("%s: Too many arguments", __FUNCTION__);
- } else {
- switch (numargs) {
- case 0:
- log_debug("%s: No arguments passed, using defaults",
__FUNCTION__);
- if (ptr->get_bandwidth() != 16384) {
- ptr->set_bandwidth(16384);
- }
- if (ptr->get_quality() != 0) {
- ptr->set_quality(0);
- }
- break;
- case 1:
- ptr->set_bandwidth(fn.arg(0).to_number());
- if (ptr->get_quality() != 0) {
- ptr->set_quality(0);
- }
- break;
- case 2:
- {
- double argument2 = fn.arg(1).to_number();
- ptr->set_bandwidth(fn.arg(0).to_number());
- if ((argument2 >= 0) && (argument2 <= 100)) {
- ptr->set_quality(fn.arg(1).to_number());
- } else {
- log_error("%s: Second argument not in range 0-100",
__FUNCTION__);
- ptr->set_quality(100);
- }
- break;
- }
- }
- }
+
+ const size_t nargs = fn.nargs;
+
+ const double b = nargs > 0 ? fn.arg(0).to_number() : 16384;
+ const double q = nargs > 1 ? fn.arg(1).to_number() : 0;
+
+ size_t quality = (q < 0 || q > 100) ? 100 : q;
+
+ ptr->setQuality(b, quality);
+
return as_value();
}
@@ -380,19 +350,16 @@
as_value
camera_activitylevel(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
+ if (!fn.nargs) {
log_unimpl("Camera::activityLevel only has default value");
- return as_value(ptr->get_activityLevel());
+ return as_value(ptr->activityLevel());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set activity property of Camera"));
- );
- }
+ );
return as_value();
}
@@ -400,59 +367,32 @@
as_value
camera_bandwidth(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
+ if (!fn.nargs) {
log_unimpl("Camera::bandwidth only has default value");
- return as_value(ptr->get_bandwidth());
+ return as_value(ptr->bandwidth());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set bandwidth property of Camera"));
- );
- }
-
- return as_value();
-}
-
-//as3 capitalization
-as_value
-camera_currentFPS(const fn_call& fn)
-{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
-
- if ( fn.nargs == 0 ) // getter
- {
- return as_value(ptr->get_currentFPS());
- }
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
- log_aserror(_("Attempt to set currentFPS property of Camera"));
- );
- }
-
- return as_value();
-}
-
-//as3 capitalization
+ );
+
+ return as_value();
+}
+
as_value
camera_currentFps(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
- return as_value(ptr->get_currentFPS());
+ if (!fn.nargs) {
+ return as_value(ptr->currentFPS());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set currentFPS property of Camera"));
- );
- }
+ );
return as_value();
}
@@ -460,18 +400,15 @@
as_value
camera_fps(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
- return as_value(ptr->get_fps());
+ if (!fn.nargs) {
+ return as_value(ptr->fps());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set fps property of Camera"));
- );
- }
+ );
return as_value();
}
@@ -479,18 +416,15 @@
as_value
camera_height(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
- return as_value(ptr->get_height());
+ if (!fn.nargs) {
+ return as_value(ptr->height());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set height property of Camera, use
setMode"));
- );
- }
+ );
return as_value();
}
@@ -498,29 +432,22 @@
as_value
camera_index(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
+ if (!fn.nargs)
{
- //livedocs say that this function should return an integer, but in
testing
- //the pp appears to, in practice, return the value as a string
- int value = ptr->get_index();
- char val = value + '0';
+ // livedocs say that this function should return an integer,
+ // but in testing the pp returns the value as a string
+ int value = ptr->index();
- std::stringstream ss;
- std::string str;
- ss << val;
- ss >> str;
- as_value name(str);
- name.convert_to_string();
- return (name);
+ std::ostringstream ss;
+ ss << value;
+ return as_value(ss.str());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set index property of Camera"));
- );
- }
+ );
return as_value();
}
@@ -528,19 +455,16 @@
as_value
camera_motionLevel(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
+ if (!fn.nargs) {
log_unimpl("Camera::motionLevel only has default value");
- return as_value(ptr->get_motionLevel());
+ return as_value(ptr->motionLevel());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set motionLevel property of Camera"));
- );
- }
+ );
return as_value();
}
@@ -548,39 +472,33 @@
as_value
camera_motionTimeout(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr = ensureType<camera_as_object>
- (fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
+ if (!fn.nargs) {
log_unimpl("Camera::motionTimeout");
- return as_value(ptr->get_motionTimeout());
+ return as_value(ptr->motionTimeout());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set motionTimeout property of Camera"));
- );
- }
+ );
return as_value();
}
as_value
-camera_muted(const fn_call& fn) {
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
+camera_muted(const fn_call& fn)
+{
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
- log_unimpl("Camera::muted");
- return as_value(ptr->get_muted());
+ if (!fn.nargs) {
+ log_unimpl("Camera.muted");
+ return as_value(ptr->muted());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set muted property of Camera"));
- );
- }
+ );
return as_value();
}
@@ -588,18 +506,15 @@
as_value
camera_name(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
- return as_value(ptr->get_name());
+ if (!fn.nargs) {
+ return as_value(ptr->name());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set name property of Camera"));
- );
- }
+ );
return as_value();
}
@@ -614,23 +529,15 @@
return as_value();
}
- // TODO: this is a static function, not a member function. Because there
- // is no this pointer, it cannot use camera_as_object to get the
- // names. It will have to query the MediaHandler directly (much of the
- // rest of the code should do this too).
- boost::intrusive_ptr<camera_as_object> ptr =
- ensureType<camera_as_object>(fn.this_ptr);
-
- //transfer from vector to an array
- std::vector<std::string> vect;
- vect = ptr->get_names();
-
- const size_t size = vect.size();
+ std::vector<std::string> names;
+ media::MediaHandler::get()->cameraNames(names);
+
+ const size_t size = names.size();
boost::intrusive_ptr<Array_as> data = new Array_as;
for (size_t i = 0; i < size; ++i) {
- data->push(vect[i]);
+ data->push(names[i]);
}
return as_value(data.get());
@@ -640,20 +547,16 @@
as_value
camera_quality(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
- ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
+ if (!fn.nargs) {
log_unimpl("Camera::quality has only default values");
- return as_value(ptr->get_quality());
+ return as_value(ptr->quality());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set quality property of Camera"));
- );
- }
+ );
return as_value();
}
@@ -667,15 +570,18 @@
as_value
camera_setLoopback(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr = ensureType<camera_as_object>
- (fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr = ensureType<Camera_as>(fn.this_ptr);
- int numargs = fn.nargs;
- if (numargs > 1) {
- log_error("%s: Too many arguments", __FUNCTION__);
- } else {
- ptr->set_loopback(fn.arg(0).to_bool());
- }
+ if (!fn.nargs) {
+ // TODO: log AS error.
+ return as_value();
+ }
+
+ if (fn.nargs > 1) {
+ log_aserror("%s: Too many arguments", "Camera.setLoopback");
+ }
+
+ ptr->setLoopback(fn.arg(0).to_bool());
return as_value();
}
@@ -688,37 +594,25 @@
}
as_value
-camera_setKeyFrameInterval(const fn_call& fn)
+camera_setKeyFrameInterval(const fn_call& /*fn*/)
{
- boost::intrusive_ptr<camera_as_object> ptr = ensureType<camera_as_object>
- (fn.this_ptr);
-
- int numargs = fn.nargs;
- if (numargs > 1) {
- log_error("%s: Too many arguments", "Camera.setKeyFrameInterval");
- } else {
- ptr->set_loopback(fn.arg(0).to_int());
- }
-
+ LOG_ONCE(log_unimpl("Camera.setKeyFrameInterval"));
return as_value();
}
as_value
camera_width(const fn_call& fn)
{
- boost::intrusive_ptr<camera_as_object> ptr =
- ensureType<camera_as_object>(fn.this_ptr);
+ boost::intrusive_ptr<Camera_as> ptr =
+ ensureType<Camera_as>(fn.this_ptr);
- if ( fn.nargs == 0 ) // getter
- {
- return as_value(ptr->get_width());
+ if (!fn.nargs) {
+ return as_value(ptr->width());
}
- else // setter
- {
- IF_VERBOSE_ASCODING_ERRORS(
+
+ IF_VERBOSE_ASCODING_ERRORS(
log_aserror(_("Attempt to set width property of Camera, use setMode"));
- );
- }
+ );
return as_value();
}
=== modified file 'libcore/asobj/flash/media/Microphone_as.cpp'
--- a/libcore/asobj/flash/media/Microphone_as.cpp 2009-08-26 12:15:53
+0000
+++ b/libcore/asobj/flash/media/Microphone_as.cpp 2009-08-28 11:21:24
+0000
@@ -421,16 +421,8 @@
as_value
microphone_names(const fn_call& fn)
{
- // TODO: this is a static function, not a member function. Because there
- // is no this pointer, it cannot use microphone_as_object to get the
- // names. It will have to query the MediaHandler directly (much of the
- // rest of the code should do this too).
- boost::intrusive_ptr<microphone_as_object> ptr =
- ensureType<microphone_as_object>(fn.this_ptr);
-
- //transfer from internal vector to AS array
+ // TODO: populate from MediaHandler like Camera.names.
std::vector<std::string> vect;
- vect = ptr->get_names();
size_t size = vect.size();
=== modified file 'libmedia/AudioInput.h'
--- a/libmedia/AudioInput.h 2009-08-24 00:13:31 +0000
+++ b/libmedia/AudioInput.h 2009-08-28 10:02:58 +0000
@@ -87,4 +87,4 @@
} // gnash.media namespace
} // gnash namespace
-#endif // __AUDIOINPUT_H__
+#endif
=== modified file 'libmedia/Makefile.am'
--- a/libmedia/Makefile.am 2009-08-04 17:46:54 +0000
+++ b/libmedia/Makefile.am 2009-08-28 11:03:35 +0000
@@ -70,7 +70,6 @@
FLVParser.cpp \
AudioResampler.cpp \
AudioInput.cpp \
- VideoInput.cpp \
$(NULL)
noinst_HEADERS = \
@@ -140,6 +139,7 @@
ffmpeg/VideoDecoderFfmpeg.cpp \
ffmpeg/AudioResamplerFfmpeg.cpp \
ffmpeg/VideoConverterFfmpeg.cpp \
+ ffmpeg/VideoInputFfmpeg.cpp \
$(NULL)
noinst_HEADERS += \
@@ -150,6 +150,7 @@
ffmpeg/AudioResamplerFfmpeg.h \
ffmpeg/ffmpegHeaders.h \
ffmpeg/VideoConverterFfmpeg.h \
+ ffmpeg/VideoInputFfmpeg.h \
$(NULL)
libgnashmedia_la_LIBADD += \
=== modified file 'libmedia/MediaHandler.h'
--- a/libmedia/MediaHandler.h 2009-02-25 02:00:44 +0000
+++ b/libmedia/MediaHandler.h 2009-08-28 10:02:58 +0000
@@ -38,6 +38,8 @@
class AudioDecoder;
class AudioInfo;
class VideoInfo;
+ class VideoInput;
+ class AudioInput;
}
}
@@ -88,8 +90,8 @@
/// NOTE: the default implementation returns an FLVParser for FLV input
/// or 0 for others.
///
- virtual std::auto_ptr<MediaParser> createMediaParser(
- std::auto_ptr<IOChannel> stream);
+ virtual std::auto_ptr<MediaParser>
+ createMediaParser(std::auto_ptr<IOChannel> stream);
/// Create a VideoDecoder for decoding what's specified in the VideoInfo
//
@@ -97,7 +99,8 @@
/// the sound correctly.
/// @return Will always return a valid VideoDecoder or throw a
/// gnash::MediaException if a fatal error occurs.
- virtual std::auto_ptr<VideoDecoder> createVideoDecoder(const VideoInfo&
info)=0;
+ virtual std::auto_ptr<VideoDecoder>
+ createVideoDecoder(const VideoInfo& info)=0;
/// Create an AudioDecoder for decoding what's specified in the
AudioInfo
//
@@ -105,16 +108,34 @@
/// the sound correctly.
/// @return Will always return a valid AudioDecoder or throw a
/// gnash::MediaException if a fatal error occurs.
- virtual std::auto_ptr<AudioDecoder> createAudioDecoder(const AudioInfo&
info)=0;
+ virtual std::auto_ptr<AudioDecoder>
+ createAudioDecoder(const AudioInfo& info)=0;
/// Create an VideoConverter for converting between color spaces.
//
/// @param srcFormat The source image color space
/// @param dstFormat The destination image color space
///
- /// @return A valid VideoConverter or a NULL auto_ptr if a fatal error
occurs.
+ /// @return A valid VideoConverter or a NULL auto_ptr if a fatal error
+ /// occurs.
virtual std::auto_ptr<VideoConverter>
- createVideoConverter(ImgBuf::Type4CC srcFormat, ImgBuf::Type4CC
dstFormat)=0;
+ createVideoConverter(ImgBuf::Type4CC srcFormat,
+ ImgBuf::Type4CC dstFormat)=0;
+
+ /// Return a VideoInput
+ //
+ /// This is always owned by the MediaHandler, but will remain alive
+ /// as long as it is referenced by a Camera object.
+ //
+ /// @param index The index of the VideoInput to return.
+ /// @return A Video Input corresponding to the specified index
+ /// or null if it is not available.
+ virtual VideoInput* getVideoInput(size_t index) = 0;
+
+ /// Return a list of available cameras.
+ //
+ /// This is re-generated every time the function is called.
+ virtual void cameraNames(std::vector<std::string>& names) const = 0;
/// Return the number of bytes padding needed for input buffers
//
=== removed file 'libmedia/VideoInput.cpp'
--- a/libmedia/VideoInput.cpp 2009-08-07 19:56:42 +0000
+++ b/libmedia/VideoInput.cpp 1970-01-01 00:00:00 +0000
@@ -1,43 +0,0 @@
-// VideoInput.cpp: Video input base class source file.
-//
-// Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
-//
-// This program is free software; you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation; either version 3 of the License, or
-// (at your option) any later version.
-//
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License
-// along with this program; if not, write to the Free Software
-// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
-#include "VideoInput.h"
-
-namespace gnash {
-namespace media {
-
- //constructor
- VideoInput::VideoInput() {
- //initialize variables
- _activityLevel = -1.0;
- _bandwidth = 16384;
- _currentFPS = 0;
- _fps = 15.0;
- _height = 120;
- _index = 0;
- _keyFrameInterval = 15;
- _loopback = false;
- _motionLevel = 50;
- _motionTimeout = 2000; //millisecs
- _muted = true; //security (false = allow, true = decline)
- _quality = 0;
- _width = 160;
- }
-
-} //media namespace
-} //gnash namespace
=== modified file 'libmedia/VideoInput.h'
--- a/libmedia/VideoInput.h 2009-08-24 00:13:31 +0000
+++ b/libmedia/VideoInput.h 2009-08-28 11:03:35 +0000
@@ -28,89 +28,106 @@
#include "dsodefs.h" //DSOEXPORT
-#include "asobj/flash/media/Camera_as.h"
-
namespace gnash {
namespace media {
+/// This is the interface for video input devices.
+//
+/// Each VideoInput should represent exactly one webcam (or similar device).
+//
+/// The interface for querying the camera is provisionally done, but needs
+/// more testing of how it actually works. Most of the values are faked.
+//
+/// TODO: separate the process of finding cameras from this class.
+/// It could be implemented as a static method. The available cameras
+/// and all created VideoInput objects should be stored in a
+/// MediaHandler, mapped by an index for retrieval by ActionScript.
+//
+/// TODO: design a useful interface for starting, stopping and attaching
+/// the video data. VideoInputGst has some functionality here, but it
+/// is not generic enough, relying on too many gst-specific
+/// implementation details.
class VideoInput {
public:
- DSOEXPORT VideoInput();
+ DSOEXPORT VideoInput() {}
// virtual classes need a virtual destructor !
virtual ~VideoInput() {}
- //setters and getters
- void set_activityLevel(double a) {_activityLevel = a;};
- double get_activityLevel () {return _activityLevel;};
-
- void set_bandwidth(int b) {_bandwidth = b;};
- int get_bandwidth() {return _bandwidth;};
-
- void set_currentFPS(double f) {_currentFPS=f;};
- double get_currentFPS() {return _currentFPS;};
-
- void set_fps(double f) {_fps = f;};
- double get_fps() {return _fps;};
-
- void set_height(int h) {_height = h;};
- int get_height() {return _height;};
-
- void set_index(int i) {_index = i;};
- int get_index() {return _index;};
-
- void set_keyFrameInterval(int i) {_keyFrameInterval = i;};
- int get_keyFrameInterval() {return _keyFrameInterval;};
-
- void set_loopback(bool l) {_loopback = l;};
- bool get_loopback() {return _loopback;};
-
- void set_motionLevel(int m) {_motionLevel = m;};
- int get_motionLevel() {return _motionLevel;};
-
- void set_motionTimeout(int m) {_motionTimeout = m;};
- int get_motionTimeout() {return _motionTimeout;};
-
- void set_muted(bool m) {_muted = m;};
- bool get_muted() {return _muted;};
-
- void set_name(std::string name) {_name = name;};
- std::string get_name() {return _name;};
-
- std::vector<std::string> get_names() {return _names;};
-
- void set_quality(int q) {_quality = q;};
- int get_quality() {return _quality;};
-
- void set_width(int w) {_width = w;};
- int get_width() {return _width;};
-
-protected:
- //specified in AS livedocs
- double _activityLevel;
- int _bandwidth;
- double _currentFPS;
- double _fps;
- int _height;
- int _index;
- int _keyFrameInterval;
- bool _loopback;
- int _motionLevel;
- int _motionTimeout;
- bool _muted;
- std::string _name;
- std::vector<std::string> _names;
- int _quality;
- int _width;
-
- //TODO: use this map to implement the Camera::get function
- //static std::map<int, camera_as_object*> _mapGet;
+ /// Return the current activity level of the webcam
+ //
+ /// @return A double specifying the amount of motion currently
+ /// detected by the camera.
+ virtual double activityLevel() const = 0;
+
+ /// The maximum available bandwidth for outgoing connections
+ //
+ /// TODO: see if this should really be here.
+ virtual size_t bandwidth() const = 0;
+
+ /// Set the bandwidth for outgoing connections.
+ virtual void setBandwidth(size_t bandwidth) = 0;
+
+ /// The current frame rate of the webcam
+ //
+ /// @return A double specifying the webcam's current FPS
+ virtual double currentFPS() const = 0;
+
+ /// The maximum FPS rate of the webcam
+ //
+ /// @return A double specifying the webcam's maximum FPS
+ virtual double fps() const = 0;
+
+ /// Return the height of the webcam's frame
+ virtual size_t height() const = 0;
+
+ /// Return the width of the webcam's frame
+ virtual size_t width() const = 0;
+
+ /// The index of the camera
+ virtual size_t index() const = 0;
+
+ /// Request a native mode most closely matching the passed variables.
+ //
+ /// @param width The required width
+ /// @param height The required height
+ /// @param fps The required frame rate
+ /// @param favorArea How to match the requested mode.
+ virtual void requestMode(size_t width, size_t height, double fps,
+ bool favorArea) = 0;
+
+ /// Set the amount of motion required before notifying the core
+ virtual void setMotionLevel(int m) = 0;
+
+ /// Return the current motionLevel setting
+ virtual int motionLevel() const = 0;
+
+ /// Set time without motion in milliseconds before core is notified
+ virtual void setMotionTimeout(int m) = 0;
+
+ /// Return the current motionTimeout setting.
+ virtual int motionTimeout() const = 0;
+
+ virtual void mute(bool m) = 0;
+ virtual bool muted() const = 0;
+
+ /// Return the name of this webcam
+ //
+ /// @return a string specifying the name of the webcam.
+ virtual const std::string& name() const = 0;
+
+ /// Set the quality of the webcam
+ virtual void setQuality(int q) = 0;
+
+ /// Return the current quality of the webcam
+ virtual int quality() const = 0;
+
};
-} // gnash.media namespace
+} // media namespace
} // gnash namespace
-#endif // __VIDEOINPUT_H__
+#endif
=== modified file 'libmedia/ffmpeg/MediaHandlerFfmpeg.cpp'
--- a/libmedia/ffmpeg/MediaHandlerFfmpeg.cpp 2009-02-25 02:00:44 +0000
+++ b/libmedia/ffmpeg/MediaHandlerFfmpeg.cpp 2009-08-28 11:03:35 +0000
@@ -25,6 +25,7 @@
#include "GnashException.h"
#include "FLVParser.h"
#include "VideoConverterFfmpeg.h"
+#include "VideoInputFfmpeg.h"
#include "IOChannel.h" // for visibility of destructor
#include "MediaParser.h" // for visibility of destructor
@@ -116,6 +117,18 @@
return ret;
}
+VideoInput*
+MediaHandlerFfmpeg::getVideoInput(size_t /*index*/)
+{
+ return new VideoInputFfmpeg();
+}
+
+void
+MediaHandlerFfmpeg::cameraNames(std::vector<std::string>& /*names*/) const
+{
+ log_unimpl("FFmpeg: camera names");
+}
+
size_t
MediaHandlerFfmpeg::getInputPaddingSize() const
{
=== modified file 'libmedia/ffmpeg/MediaHandlerFfmpeg.h'
--- a/libmedia/ffmpeg/MediaHandlerFfmpeg.h 2009-02-25 22:33:03 +0000
+++ b/libmedia/ffmpeg/MediaHandlerFfmpeg.h 2009-08-28 11:03:35 +0000
@@ -48,16 +48,24 @@
{
public:
- virtual std::auto_ptr<MediaParser>
createMediaParser(std::auto_ptr<IOChannel> stream);
+ virtual std::auto_ptr<MediaParser>
+ createMediaParser(std::auto_ptr<IOChannel> stream);
- virtual std::auto_ptr<VideoDecoder> createVideoDecoder(const VideoInfo&
info);
+ virtual std::auto_ptr<VideoDecoder>
+ createVideoDecoder(const VideoInfo& info);
virtual std::auto_ptr<VideoConverter>
- createVideoConverter(ImgBuf::Type4CC srcFormat, ImgBuf::Type4CC
dstFormat);
+ createVideoConverter(ImgBuf::Type4CC srcFormat,
+ ImgBuf::Type4CC dstFormat);
- virtual std::auto_ptr<AudioDecoder> createAudioDecoder(const AudioInfo&
info);
+ virtual std::auto_ptr<AudioDecoder>
+ createAudioDecoder(const AudioInfo& info);
virtual size_t getInputPaddingSize() const;
+
+ virtual VideoInput* getVideoInput(size_t index);
+
+ virtual void cameraNames(std::vector<std::string>& names) const;
};
=== added file 'libmedia/ffmpeg/VideoInputFfmpeg.cpp'
--- a/libmedia/ffmpeg/VideoInputFfmpeg.cpp 1970-01-01 00:00:00 +0000
+++ b/libmedia/ffmpeg/VideoInputFfmpeg.cpp 2009-08-28 11:03:35 +0000
@@ -0,0 +1,58 @@
+// VideoInputFfmpeg.h: Video input processing using Gstreamer
+//
+// Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation; either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+#include "VideoInputFfmpeg.h"
+
+namespace gnash {
+namespace media {
+namespace ffmpeg {
+
+VideoInputFfmpeg::VideoInputFfmpeg()
+ :
+ _activityLevel(-1.0),
+ _bandwidth(16384),
+ _currentFPS(0),
+ _fps(15.0),
+ _height(120),
+ _width(160),
+ _index(0),
+ _motionLevel(50),
+ _motionTimeout(2000),
+ _muted(true),
+ _quality(0)
+{
+}
+
+void
+VideoInputFfmpeg::requestMode(size_t width, size_t height, double fps,
+ bool favorArea)
+{
+ // TODO: check what mode is available and set the best match.
+ _width = width;
+ _height = height;
+ _fps = fps;
+}
+
+
+VideoInputFfmpeg::~VideoInputFfmpeg()
+{
+}
+
+}
+}
+}
=== added file 'libmedia/ffmpeg/VideoInputFfmpeg.h'
--- a/libmedia/ffmpeg/VideoInputFfmpeg.h 1970-01-01 00:00:00 +0000
+++ b/libmedia/ffmpeg/VideoInputFfmpeg.h 2009-08-28 11:03:35 +0000
@@ -0,0 +1,178 @@
+// VideoInputFfmpeg.h: Video input processing using Gstreamer
+//
+// Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
+//
+// This program is free software; you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation; either version 3 of the License, or
+// (at your option) any later version.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with this program; if not, write to the Free Software
+// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+#ifndef GNASH_VIDEOINPUTFFMPEG_H
+#define GNASH_VIDEOINPUTFFMPEG_H
+
+#include <boost/cstdint.hpp> // for C99 int types
+#include "VideoInput.h"
+
+namespace gnash {
+namespace media {
+namespace ffmpeg {
+
+class VideoInputFfmpeg : public VideoInput
+{
+public:
+
+ /// Constructor for the VideoInputFfmpeg class
+ //
+ /// TODO: most of these properties need not be stored, but should rather
+ /// be queried from the input device.
+ VideoInputFfmpeg();
+
+ /// Destructor for the VideoInputGst class
+ virtual ~VideoInputFfmpeg();
+
+ static void getNames(std::vector<std::string>& /*names*/) {}
+
+ /// Return the current activity level of the webcam
+ //
+ /// @return A double specifying the amount of motion currently
+ /// detected by the camera.
+ double activityLevel () const { return _activityLevel; }
+
+ /// The maximum available bandwidth for outgoing connections
+ //
+ /// TODO: see if this should really be here.
+ size_t bandwidth() const { return _bandwidth; }
+
+ void setBandwidth(size_t bandwidth) {
+ _bandwidth = bandwidth;
+ }
+
+ /// The current frame rate of the webcam
+ //
+ /// @return A double specifying the webcam's current FPS
+ double currentFPS() const { return _currentFPS; }
+
+ /// The maximum FPS rate of the webcam
+ //
+ /// @return A double specifying the webcam's maximum FPS
+ double fps() const { return _fps; }
+
+ /// Return the height of the webcam's frame
+ size_t height() const { return _height; }
+
+ /// Return the width of the webcam's frame
+ size_t width() const { return _width; }
+
+ /// The index of the camera
+ size_t index() const { return _index; }
+
+ /// Request a native mode most closely matching the passed variables.
+ //
+ /// @param width The required width
+ /// @param height The required height
+ /// @param fps The required frame rate
+ /// @param favorArea How to match the requested mode.
+ void requestMode(size_t width, size_t height, double fps, bool favorArea);
+
+ /// Set the amount of motion required before notifying the core
+ void setMotionLevel(int m) { _motionLevel = m; }
+
+ /// Return the current motionLevel setting
+ int motionLevel() const { return _motionLevel; }
+
+ /// Set time without motion in milliseconds before core is notified
+ void setMotionTimeout(int m) { _motionTimeout = m; }
+
+ /// Return the current motionTimeout setting.
+ int motionTimeout() const { return _motionTimeout; }
+
+ void mute(bool m) { _muted = m; }
+ bool muted() const { return _muted; }
+
+ /// Return the name of this webcam
+ //
+ /// @return a string specifying the name of the webcam.
+ const std::string& name() const { return _name; }
+
+ /// Set the quality of the webcam
+ void setQuality(int q) { _quality = q; }
+
+ /// Return the current quality of the webcam
+ int quality() const { return _quality; }
+
+ /// \brief Function starts up the pipeline designed earlier in code
+ /// execution. This puts everything into motion.
+ ///
+ /// @return True if the pipeline was started correctly, false otherwise.
+ bool play();
+
+ /// \brief Function stops the pipeline designed earlier in code execution.
+ ///
+ /// @return True if the pipeline was stopped correctly, false otherwise.
+ bool stop();
+
+private:
+
+ /// TODO: see which of these need to be retrieved from the camera,
+ /// which of them should be stored like this, and which should
+ /// be stored in the Camera_as relay object.
+
+ /// The currently detected activity level. This should be queried from
+ /// the camera.
+ double _activityLevel;
+
+ /// The available bandwidth. This probably shouldn't be dealt with by
+ /// the camera class. But maybe it should.
+ size_t _bandwidth;
+
+ /// The current FPS of the camera. This should be queried from the
+ /// camera.
+ double _currentFPS;
+
+ /// The maximum FPS allowed.
+ double _fps;
+
+ /// The height of the frame. This should probably be retrieved from
+ /// the camera
+ size_t _height;
+
+ /// The width of the frame. This should probably be retrieved from
+ /// the camera
+ size_t _width;
+
+ /// The index of this Webcam
+ size_t _index;
+
+ /// The motion level required to trigger a notification to the core
+ int _motionLevel;
+
+ /// The length of inactivity required to trigger a notification to the
core.
+ int _motionTimeout;
+
+ /// Whether access to the camera is allowed. This depends on the rcfile
+ /// setting
+ bool _muted;
+
+ /// The name of this camera.
+ std::string _name;
+
+ /// The current quality setting.
+ int _quality;
+
+};
+
+
+} // ffmpeg namespace
+} // media namespace
+} // gnash namespace
+
+#endif
=== modified file 'libmedia/gst/MediaHandlerGst.cpp'
--- a/libmedia/gst/MediaHandlerGst.cpp 2009-05-29 12:30:46 +0000
+++ b/libmedia/gst/MediaHandlerGst.cpp 2009-08-28 10:02:58 +0000
@@ -23,6 +23,7 @@
#include "AudioDecoderGst.h"
#include "MediaParserGst.h"
#include "VideoConverterGst.h"
+#include "VideoInputGst.h"
#include "FLVParser.h"
#ifdef DECODING_SPEEX
@@ -153,6 +154,20 @@
return converter;
}
+VideoInput*
+MediaHandlerGst::getVideoInput(size_t /*index*/)
+{
+ // FIXME: these should be stored in the media handler, not newly
+ // created each time. The documentation is correct, implementation wrong.
+ return new VideoInputGst();
+}
+
+void
+MediaHandlerGst::cameraNames(std::vector<std::string>& names) const
+{
+ VideoInputGst::getNames(names);
+}
+
} // gnash.media.gst namespace
} // gnash.media namespace
} // gnash namespace
=== modified file 'libmedia/gst/MediaHandlerGst.h'
--- a/libmedia/gst/MediaHandlerGst.h 2009-02-25 22:33:03 +0000
+++ b/libmedia/gst/MediaHandlerGst.h 2009-08-28 10:02:58 +0000
@@ -48,14 +48,22 @@
{
public:
- virtual std::auto_ptr<MediaParser>
createMediaParser(std::auto_ptr<IOChannel> stream);
-
- virtual std::auto_ptr<VideoDecoder> createVideoDecoder(const VideoInfo&
info);
-
- virtual std::auto_ptr<AudioDecoder> createAudioDecoder(const AudioInfo&
info);
+ virtual std::auto_ptr<MediaParser>
+ createMediaParser(std::auto_ptr<IOChannel> stream);
+
+ virtual std::auto_ptr<VideoDecoder>
+ createVideoDecoder(const VideoInfo& info);
+
+ virtual std::auto_ptr<AudioDecoder>
+ createAudioDecoder(const AudioInfo& info);
- virtual std::auto_ptr<VideoConverter>
createVideoConverter(ImgBuf::Type4CC srcFormat,
- ImgBuf::Type4CC
dstFormat);
+ virtual std::auto_ptr<VideoConverter>
+ createVideoConverter(ImgBuf::Type4CC srcFormat,
+ ImgBuf::Type4CC dstFormat);
+
+ virtual VideoInput* getVideoInput(size_t index);
+
+ virtual void cameraNames(std::vector<std::string>& names) const;
};
@@ -63,4 +71,4 @@
} // gnash.media namespace
} // namespace gnash
-#endif // __MEDIAHANDLERGST_H__
+#endif
=== modified file 'libmedia/gst/VideoInputGst.cpp'
--- a/libmedia/gst/VideoInputGst.cpp 2009-08-24 19:39:20 +0000
+++ b/libmedia/gst/VideoInputGst.cpp 2009-08-28 11:45:25 +0000
@@ -24,9 +24,10 @@
#include "log.h"
#include "GstUtil.h"
#include "gst/gst.h"
+#include "rc.h"
+#include "utility.h"
#include <gst/interfaces/propertyprobe.h>
#include <vector>
-#include "rc.h"
#include <cmath>
@@ -38,1210 +39,1577 @@
namespace gnash {
namespace media {
namespace gst {
-
- //initializes the Gstreamer interface
- VideoInputGst::VideoInputGst()
- {
- gst_init(NULL,NULL);
-
- int devSelection;
- findVidDevs();
-
- //enumerate names array for actionscript accessibility
- for (size_t i = 0; i < _vidVect.size(); ++i) {
- _names.push_back(_vidVect[i]->getProductName());
- }
-
- devSelection = makeWebcamDeviceSelection();
- _devSelection = devSelection;
- //also set _index for actionscript accessibility
- if (devSelection < 10) {
- _index = devSelection;
- } else {
- log_error("too high an index value, will cause segfault");
- }
-
- transferToPrivate(devSelection);
- webcamCreateMainBin(_globalWebcam);
- webcamCreateDisplayBin(_globalWebcam);
- webcamCreateSaveBin(_globalWebcam);
- }
-
- VideoInputGst::~VideoInputGst()
- {
- log_unimpl("Video Input destructor");
- }
-
- //populates video devices to a vector of GnashWebcam pointers
- //which contain important information about the hardware camera
- //inputs available on the machine
- void
- VideoInputGst::findVidDevs()
- {
- _numdevs = 0;
-
- //find video test sources
- GstElement *element;
- element = gst_element_factory_make ("videotestsrc", "vidtestsrc");
-
- if (element == NULL) {
- log_error("%s: Could not create video test source.", __FUNCTION__);
- _vidVect.push_back(NULL);
- _numdevs += 1;
- } else {
- _vidVect.push_back(new GnashWebcam);
- _vidVect[_numdevs]->setElementPtr(element);
-
_vidVect[_numdevs]->setGstreamerSrc(g_strdup_printf("videotestsrc"));
- _vidVect[_numdevs]->setProductName(g_strdup_printf("videotest"));
- _numdevs += 1;
- }
-
- //find v4l devices
- GstPropertyProbe *probe;
- GValueArray *devarr;
- element = NULL;
-
- element = gst_element_factory_make ("v4lsrc", "v4lvidsrc");
- probe = GST_PROPERTY_PROBE (element);
- devarr = gst_property_probe_probe_and_get_values_name (probe,
"device");
- for (size_t i = 0; devarr != NULL && i < devarr->n_values; ++i) {
- GValue *val;
- gchar *dev_name = NULL;
-
- val = g_value_array_get_nth (devarr, i);
- g_object_set (element, "device", g_value_get_string (val), NULL);
- gst_element_set_state (element, GST_STATE_PLAYING);
- g_object_get (element, "device-name", &dev_name, NULL);
- gst_element_set_state (element, GST_STATE_NULL);
- if (g_strcmp0(dev_name, "null") == 0) {
- log_debug("No v4l video sources. Checking for other vid
inputs");
- }
- else {
- _vidVect.push_back(new GnashWebcam);
- _vidVect[_numdevs]->setElementPtr(element);
- _vidVect[_numdevs]->setGstreamerSrc(g_strdup_printf("v4lsrc"));
- _vidVect[_numdevs]->setProductName(dev_name);
-
- //set device location information (e.g. /dev/video0)
- gchar *location;
- g_object_get (element, "device", &location , NULL);
- _vidVect[_numdevs]->setDevLocation(location);
- _numdevs += 1;
- }
- }
- if (devarr) {
- g_value_array_free (devarr);
- }
-
- //find v4l2 devices
- probe = NULL;
- devarr = NULL;
- element = NULL;
-
- element = gst_element_factory_make ("v4l2src", "v4l2vidsrc");
- probe = GST_PROPERTY_PROBE (element);
- devarr = gst_property_probe_probe_and_get_values_name (probe,
"device");
- for (size_t i = 0; devarr != NULL && i < devarr->n_values; ++i) {
- GValue *val;
- gchar *dev_name = NULL;
-
- val = g_value_array_get_nth (devarr, i);
- g_object_set (element, "device", g_value_get_string (val), NULL);
- gst_element_set_state (element, GST_STATE_PLAYING);
- g_object_get (element, "device-name", &dev_name, NULL);
- gst_element_set_state (element, GST_STATE_NULL);
- if (g_strcmp0(dev_name, "null") == 0) {
- log_debug("no v4l2 video sources found.");
- }
- else {
- _vidVect.push_back(new GnashWebcam);
- _vidVect[_numdevs]->setElementPtr(element);
-
_vidVect[_numdevs]->setGstreamerSrc(g_strdup_printf("v4l2src"));
- _vidVect[_numdevs]->setProductName(dev_name);
-
- //set device location information (e.g. /dev/video0)
- gchar *location;
- g_object_get (element, "device", &location , NULL);
- _vidVect[_numdevs]->setDevLocation(location);
- _numdevs += 1;
- }
- }
- if (devarr) {
- g_value_array_free (devarr);
- }
- }
-
-
- //called by addSupportedFormat. finds the highest possible framerate
- //to record at (can be shaped down by a filter for performance)
- void
- VideoInputGst::findHighestFramerate(WebcamVidFormat *format)
- {
- gint framerate_numerator;
- gint framerate_denominator;
- gint i;
-
- //Select the highest framerate up to less than or equal to 30 Hz
- framerate_numerator = 1;
- framerate_denominator = 1;
- for (i = 0; i < format->numFramerates; i++) {
- gfloat framerate = format->framerates[i].numerator /
- format->framerates[i].denominator;
- if (framerate > ((float) framerate_numerator /
framerate_denominator)
- && framerate <= 30) {
- framerate_numerator = format->framerates[i].numerator;
- framerate_denominator = format->framerates[i].denominator;
- }
- }
- //set highest found above
- format->highestFramerate.numerator = framerate_numerator;
- format->highestFramerate.denominator = framerate_denominator;
- }
-
- //find the framerates at which the selected format can handle input
- void
- VideoInputGst::getSupportedFramerates
- (WebcamVidFormat *video_format, GstStructure *structure)
- {
- const GValue *framerates;
- gint i, j;
-
- //note that framerates may contain one value, a list, or a range
- framerates = gst_structure_get_value (structure, "framerate");
- if (GST_VALUE_HOLDS_FRACTION (framerates)) {
- video_format->numFramerates = 1;
- video_format->framerates =
- g_new0 (FramerateFraction, video_format->numFramerates);
- video_format->framerates[0].numerator =
- gst_value_get_fraction_numerator (framerates);
- video_format->framerates[0].denominator =
- gst_value_get_fraction_denominator (framerates);
- }
- else if (GST_VALUE_HOLDS_LIST (framerates)) {
- video_format->numFramerates = gst_value_list_get_size (framerates);
- video_format->framerates =
- g_new0 (FramerateFraction, video_format->numFramerates);
- for (i = 0; i < video_format->numFramerates; i++) {
- const GValue *value;
- value = gst_value_list_get_value (framerates, i);
- video_format->framerates[i].numerator =
- gst_value_get_fraction_numerator (value);
- video_format->framerates[i].denominator =
- gst_value_get_fraction_denominator (value);
- }
- }
- else if (GST_VALUE_HOLDS_FRACTION_RANGE (framerates)) {
- gint numerator_min, denominator_min, numerator_max,
denominator_max;
- const GValue *fraction_range_min;
- const GValue *fraction_range_max;
-
- fraction_range_min =
- gst_value_get_fraction_range_min (framerates);
- numerator_min =
- gst_value_get_fraction_numerator (fraction_range_min);
- denominator_min =
- gst_value_get_fraction_denominator (fraction_range_min);
-
- fraction_range_max = gst_value_get_fraction_range_max (framerates);
- numerator_max =
- gst_value_get_fraction_numerator (fraction_range_max);
- denominator_max =
- gst_value_get_fraction_denominator (fraction_range_max);
- log_debug ("FractionRange: %d/%d - %d/%d",
- numerator_min, denominator_min, numerator_max,
denominator_max);
-
- video_format->numFramerates =
- (numerator_max - numerator_min + 1) *
- (denominator_max - denominator_min + 1);
- video_format->framerates =
- g_new0 (FramerateFraction, video_format->numFramerates);
- int k = 0;
- for (i = numerator_min; i <= numerator_max; i++) {
- for (j = denominator_min; j <= denominator_max; j++) {
- video_format->framerates[k].numerator = i;
- video_format->framerates[k].denominator = j;
- k++;
- }
- }
- }
- else {
- g_critical ("GValue type %s, cannot be handled for framerates",
- G_VALUE_TYPE_NAME (framerates));
- }
- }
-
- //we found a supported framerate and want to add the information to
- //the GnashWebcam structure
- void
- VideoInputGst::addSupportedFormat(GnashWebcam *cam, WebcamVidFormat
*video_format,
- GstStructure *format_structure)
- {
- gint i;
- gchar *resolution;
-
- getSupportedFramerates(video_format, format_structure);
- findHighestFramerate(video_format);
-
- resolution = g_strdup_printf ("%ix%i", video_format->width,
- video_format->height);
- i = GPOINTER_TO_INT(g_hash_table_lookup (cam->supportedResolutions,
resolution));
-
- //if i returns a value, maybe this resolution has been added
previously?
- if(i) {
- WebcamVidFormat *curr_format =
- &g_array_index(cam->videoFormats, WebcamVidFormat, i - 1);
- gfloat new_framerate =
(float)(video_format->highestFramerate.numerator /
- video_format->highestFramerate.denominator);
- gfloat curr_framerate =
(float)(curr_format->highestFramerate.numerator /
-
curr_format->highestFramerate.denominator);
- if (new_framerate > curr_framerate) {
- log_debug("higher framerate replacing existing format");
- *curr_format = *video_format;
- }
-
- g_free (resolution);
-
- return;
- }
-
- g_array_append_val (cam->videoFormats, *video_format);
- g_hash_table_insert (cam->supportedResolutions, resolution,
- GINT_TO_POINTER(cam->numVideoFormats + 1));
-
- cam->numVideoFormats++;
- }
-
- //pulls webcam device selection from gnashrc (will eventually tie into
- //gui)
- int
- VideoInputGst::makeWebcamDeviceSelection()
- {
- int dev_select;
+
+/// \class GnashWebcamPrivate
+///
+/// This class is initialized once a hardware video input device is chosen.
+/// It is really the workhorse of VideoInputGst. It contains all the important
+/// Gstreamer elements (element pointers, bins, pipelines, the GMainLoop, etc.)
+///
+class GnashWebcamPrivate
+{
+ public:
+ /// Constructor for the GnashWebcamPrivate class.
+ GnashWebcamPrivate();
+
+ /// \brief Accessor to set the private _webcamDevice variable in the
+ /// GnashWebcamPrivate class.
+ ///
+ /// @param d A pointer to a GnashWebcam class for the selected input
device.
+ void setWebcamDevice(GnashWebcam *d) {_webcamDevice = d;}
+
+ //FIXME: this should eventually be a private or protected data field
+ //protected:
+
+ /// \var GnashWebcamPrivate::_pipeline
+ ///
+ /// \brief A pointer to the main Gstreamer pipeline that all
+ /// created elements and bins will be dropped into.
+ GstElement *_pipeline;
+
+ /// \var GnashWebcamPrivate::_webcamSourceBin
+ ///
+ /// A pointer to the Gstreamer source bin. This variable is set
+ /// inside of the make_webcamSourceBin() function. The pipeline
+ /// API of this source bin is written as follows:
+ /// videosourcedevice ! capsfilter (ghostpad)
+ GstElement *_webcamSourceBin;
+
+ /// \var GnashWebcamPrivate::_webcamMainBin
+ ///
+ /// A pointer to the Gstreamer main bin. This variable is set
+ /// inside of the make_webcamMainBin() function. The pipeline
+ /// API of the main bin is written as follows:
+ /// tee ! save_queue (ghostpad)
+ ///
+ /// tee ! display_queue (ghostpad)
+ ///
+ /// This basically creates two queues where video stream data sits
+ /// and can be attached (optionally) to a display_bin to show the
+ /// video onscreen or to a save_bin to mux-out the stream and
+ /// save to a file on disk.
+ GstElement *_webcamMainBin;
+
+ /// \var GnashWebcamPrivate::_videoDisplayBin
+ ///
+ /// A pointer to the Gstreamer display bin. This variable is set
+ /// inside of the make_webcam_display_bin() function. The pipeline
+ /// API of the video_display_bin is written as follows:
+ ///
+ /// videoscale ! videosink
+ ///
+ /// This bin is dropped into the webcam_main_bin, but by default
+ /// the connection to display_queue is not made. This means that
+ /// even though the video_display_bin is created, it is not linked
+ /// and thus will not show video to the screen unless you call the
+ /// webcamMakeVideoDisplayLink() function.
+ GstElement *_videoDisplayBin;
+
+ /// \var GnashWebcamPrivate::_videoSaveBin
+ ///
+ /// A pointer to the Gstreamer video_save_bin. This variable is set
+ /// inside of the make_webcam_save_bin() function. The pipeline
+ /// API of the video_save_bin is written as follows:
+ ///
+ /// ffmpegcolorspace ! videorate ! videoscale ! theoraenc ! oggmux !
filesink
+ ///
+ /// This bin is dropped into the webcam_main_bin and is linked
automatically
+ /// to the video_save_queue element in the webcam_main_bin
+ /// Note: if you want to save the file in a different format, simply
+ /// link up video scale to a different encoder and muxer.
+ GstElement *_videoSaveBin;
+
+ /// \var GnashWebcamPrivate::_videoSource
+ /// \brief Contains a direct link to the src pad in the video source
+ /// element. This is different from _webcamSourceBin in that
+ /// it points to the video source element INSIDE the bin, not
+ /// the source bin itself.
+ GstElement *_videoSource;
+
+ /// \var GnashWebcamPrivate::_capsFilter
+ /// \brief Contains a direct link to the src pad in the capsfilter
+ /// element.
+ GstElement *_capsFilter;
+
+ /// \var GnashWebcamPrivate:_videoFileSink
+ /// \brief Contains a direct link to the video_file_sink element
+ GstElement *_videoFileSink;
+
+ /// \var GnashWebcamPrivate::_videoEnc
+ /// \brief Contains a direct link to the video encoder element
+ GstElement *_videoEnc;
+
+ /// \var GnashWebcamPrivate::_pipelineIsPlaying
+ /// \brief Boolean value which is changed based on whether or not
+ /// the Gstreamer pipeline status is GST_STATE_PLAYING (true)
+ /// or GST_STATE_NULL (false), GST_STATE_READY (false),
+ /// GST_STATE_PAUSED (false).
+ gboolean _pipelineIsPlaying;
+
+ /// \var GnashWebcamPrivate::_webcamDevice
+ /// \brief Contains a pointer to the original GnashWebcam class
+ /// that was created when enumerating and probing attached
+ /// hardware.
+ GnashWebcam *_webcamDevice;
+
+ /// \var GnashWebcamPrivate::_currentFormat
+ /// \brief Contains a pointer to the WebcamVidFormat data structure
+ /// selected to be used with this pipeline.
+ WebcamVidFormat *_currentFormat;
+
+ /// \var GnashWebcamPrivate::_eosTimeoutId
+ /// \brief This variable is not currently used, but will eventually
+ /// be used as a timeout when networking encapsulation is being
+ /// used.
+ guint _eosTimeoutId;
+};
+
+/// \class GnashWebcam
+///
+/// The initial data structure used to store enumerated information about
+/// attached hardware video input devices. This class is smaller in size
+/// than the GnashWebcamPrivate class which is initialized once the user
+/// specifies a hardware input device to use in the gnashrc file.
+///
+class GnashWebcam {
+ public:
+ /// \brief Accessor to retreive a the private _element variable
+ /// from the GnashWebcam class which contains a pointer
+ /// to the video source element.
+ ///
+ /// @return GstElement* to the video source element
+ GstElement* getElementPtr() {return _element;};
+
+ /// \brief Accessor to set the private _element variable from
+ /// the GnashWebcam class.
+ ///
+ /// @param element The GstElement pointer to the video source element.
+ void setElementPtr(GstElement* element) {_element = element;};
+
+ /// \brief Accessor to get the private _devLocation variable from
+ /// the GnashWebcam class.
+ ///
+ /// @return The _devLocation private variable from GnashWebcam class.
+ gchar* getDevLocation() {return _devLocation;};
+
+ /// \brief Accessor to set the private _devLocation variable from
+ /// the GnashWebcam class.
+ ///
+ /// @param l A gchar* containing the physical location of the video
+ /// input hardware device (e.g. on Linux typically would be set
+ /// to '/dev/video0').
+ void setDevLocation(gchar *l) {_devLocation = l;};
+
+ /// \brief Accessor to return the private _gstreamerSrc variable
+ /// from the GnashWebcam class.
+ ///
+ /// @return The _gstreamerSrc variable from the GnashWebcam class.
+ /// which should contain the type of the Gstreamer video source
+ /// element (e.g. v4lsrc, v4l2src).
+ gchar* getGstreamerSrc() {return _gstreamerSrc;};
+
+ /// \brief Accessor to set the private _gstreamerSrc variable
+ /// from the GnashWebcam class.
+ ///
+ /// @param s A gchar* containing the type of the Gstreamer source
+ /// element type (e.g. v4lsrc, v4l2src, etc)
+ void setGstreamerSrc(gchar *s) {_gstreamerSrc = s;};
+
+ /// \brief Accessor to get the private _productName variable
+ /// from the GnashWebcam class.
+ ///
+ /// @return A gchar* containing the video input's hardware name
+ /// (e.g. Built-In Webcam or Microsoft LifeCam VX500).
+ gchar* getProductName() {return _productName;};
+
+ /// \brief Accessor to set the private _productName variable
+ /// from the GnashWebcam class.
+ ///
+ /// @param n A gchar* to the hardware input device's hardware name
+ /// (e.g. Built-In Webcam or Microsoft LifeCam VX500).
+ void setProductName(gchar *n) {_productName = n;};
+
+ /// \var GnashWebcam::numVideoFormats
+ /// \brief Contains an integer value representing the number of
+ /// video formats the camera supports (used for iteration
+ /// purposes).
+ gint numVideoFormats;
+
+ /// \var GnashWebcam::videoFormats
+ /// \brief A GArray containing WebcamVidFormat data structures
+ /// (see WebcamVidFormat class documentation for more info).
+ GArray* videoFormats;
+
+ /// \var GnashWebcam::supportedResolutions
+ /// \brief A hash table for easy lookup of resolutions the hardware
+ /// camera supports.
+ GHashTable* supportedResolutions;
+
+ /// Constructor for the GnashWebcam class.
+ GnashWebcam();
+
+ private:
+ /// \var GnashWebcam::_element
+ /// \brief GstElement* which points to the video source
+ /// element.
+ GstElement* _element;
+
+ /// \var GnashWebcam::_devLocation
+ /// \brief Contains the physical location of the webcam device
+ /// (e.g. on Linux typically would be set to /dev/video0).
+ gchar* _devLocation;
+
+ /// \var GnashWebcam::_gstreamerSrc
+ /// \brief Contains a gchar* which describes the gstreamer source
+ /// type (e.g. v4lsrc or v4l2src).
+ gchar* _gstreamerSrc;
+
+ /// \var GnashWebcam::_productName
+ /// \brief Contains a gchar* which describes the name of the hardware
+ /// device (e.g. Built-In Webcam or Microsoft LifeCam VX500).
+ gchar* _productName;
+};
+
+void
+VideoInputGst::getNames(std::vector<std::string>& names)
+{
+ // Make sure gst is initialized
+ gst_init(NULL, NULL);
+
+ std::vector<GnashWebcam*> cams;
+ // Check for devices
+ findVidDevs(cams);
+
+ for (size_t i = 0; i < cams.size(); ++i) {
+ GnashWebcam* cam = cams[i];
+ if (cam) names.push_back(cam->getProductName());
+ }
+}
+
+
+//initializes the Gstreamer interface
+VideoInputGst::VideoInputGst()
+ :
+ _activityLevel(-1.0),
+ _bandwidth(16384),
+ _currentFPS(0),
+ _fps(15.0),
+ _height(120),
+ _width(160),
+ _index(0),
+ _motionLevel(50),
+ _motionTimeout(2000),
+ _muted(true),
+ _quality(0)
+{
+ gst_init(NULL,NULL);
+
+ // TODO: there is really no need to store all the cameras, as a
+ // VideoInput class should correspond to one camera.
+ findVidDevs(_vidVect);
+
+ _devSelection = makeWebcamDeviceSelection();
+ //also set _index for actionscript accessibility
+ if (_devSelection < 10) {
+ _index = _devSelection;
+ } else {
+ log_error("too high an index value, will cause segfault");
+ }
+
+ setWebcam(_devSelection);
+ webcamCreateMainBin();
+ webcamCreateDisplayBin();
+ webcamCreateSaveBin();
+}
+
+VideoInputGst::~VideoInputGst()
+{
+}
+
+bool
+VideoInputGst::init()
+{
+ return webcamCreateMainBin() && webcamCreateDisplayBin() &&
+ webcamMakeVideoDisplayLink();
+}
+
+void
+VideoInputGst::requestMode(size_t width, size_t height, double fps,
+ bool favorArea)
+{
+ // TODO: this should select an available height, width and frame rate
+ // depending on the favorArea variable.
+ _width = width;
+ _height = height;
+ _fps = fps;
+
+ UNUSED(favorArea);
+
+ // I don't know what the point is of this. It was previously in Camera_as,
+ // where it certainly shouldn't be.
+ webcamChangeSourceBin();
+
+}
+
+// Populates video devices to a vector of GnashWebcam pointers
+// which contain important information about the hardware camera
+// inputs available on the machine
+void
+VideoInputGst::findVidDevs(std::vector<GnashWebcam*>& cameraList)
+{
+
+ //find video test sources
+ GstElement *element;
+ element = gst_element_factory_make ("videotestsrc", "vidtestsrc");
+
+ if (element == NULL) {
+ log_error("%s: Could not create video test source.", __FUNCTION__);
+ cameraList.push_back(NULL);
+ } else {
+ cameraList.push_back(new GnashWebcam);
+ GnashWebcam& cam = *cameraList.back();
+ cam.setElementPtr(element);
+ cam.setGstreamerSrc(g_strdup_printf("videotestsrc"));
+ cam.setProductName(g_strdup_printf("videotest"));
+ }
+
+ //find v4l devices
+ GstPropertyProbe *probe;
+ GValueArray *devarr;
+ element = NULL;
+
+ element = gst_element_factory_make ("v4lsrc", "v4lvidsrc");
+ probe = GST_PROPERTY_PROBE (element);
+ devarr = gst_property_probe_probe_and_get_values_name (probe, "device");
+ for (size_t i = 0; devarr != NULL && i < devarr->n_values; ++i) {
+ GValue *val;
+ gchar *dev_name = NULL;
+
+ val = g_value_array_get_nth (devarr, i);
+ g_object_set (element, "device", g_value_get_string (val), NULL);
+ gst_element_set_state (element, GST_STATE_PLAYING);
+ g_object_get (element, "device-name", &dev_name, NULL);
+ gst_element_set_state (element, GST_STATE_NULL);
+ if (g_strcmp0(dev_name, "null") == 0) {
+ log_debug("No v4l video sources. Checking for other vid inputs");
+ }
+ else {
+ cameraList.push_back(new GnashWebcam);
+ GnashWebcam& cam = *cameraList.back();
+
+ cam.setElementPtr(element);
+ cam.setGstreamerSrc(g_strdup_printf("v4lsrc"));
+ cam.setProductName(dev_name);
+
+ //set device location information (e.g. /dev/video0)
+ gchar *location;
+ g_object_get (element, "device", &location , NULL);
+ cam.setDevLocation(location);
+ }
+ }
+ if (devarr) {
+ g_value_array_free (devarr);
+ }
+
+ //find v4l2 devices
+ probe = NULL;
+ devarr = NULL;
+ element = NULL;
+
+ element = gst_element_factory_make ("v4l2src", "v4l2vidsrc");
+ probe = GST_PROPERTY_PROBE (element);
+ devarr = gst_property_probe_probe_and_get_values_name (probe, "device");
+ for (size_t i = 0; devarr != NULL && i < devarr->n_values; ++i) {
+ GValue *val;
+ gchar *dev_name = NULL;
+
+ val = g_value_array_get_nth (devarr, i);
+ g_object_set (element, "device", g_value_get_string (val), NULL);
+ gst_element_set_state (element, GST_STATE_PLAYING);
+ g_object_get (element, "device-name", &dev_name, NULL);
+ gst_element_set_state (element, GST_STATE_NULL);
+ if (g_strcmp0(dev_name, "null") == 0) {
+ log_debug("no v4l2 video sources found.");
+ }
+ else {
+ cameraList.push_back(new GnashWebcam);
+ GnashWebcam& cam = *cameraList.back();
+ cam.setElementPtr(element);
+ cam.setGstreamerSrc(g_strdup_printf("v4l2src"));
+ cam.setProductName(dev_name);
+
+ //set device location information (e.g. /dev/video0)
+ gchar *location;
+ g_object_get (element, "device", &location , NULL);
+ cam.setDevLocation(location);
+ }
+ }
+ if (devarr) {
+ g_value_array_free (devarr);
+ }
+}
+
+
+//called by addSupportedFormat. finds the highest possible framerate
+//to record at (can be shaped down by a filter for performance)
+void
+VideoInputGst::findHighestFramerate(WebcamVidFormat *format)
+{
+ gint framerate_numerator;
+ gint framerate_denominator;
+ gint i;
+
+ //Select the highest framerate up to less than or equal to 30 Hz
+ framerate_numerator = 1;
+ framerate_denominator = 1;
+ for (i = 0; i < format->numFramerates; i++) {
+ gfloat framerate = format->framerates[i].numerator /
+ format->framerates[i].denominator;
+ if (framerate > ((float) framerate_numerator / framerate_denominator)
+ && framerate <= 30) {
+ framerate_numerator = format->framerates[i].numerator;
+ framerate_denominator = format->framerates[i].denominator;
+ }
+ }
+ //set highest found above
+ format->highestFramerate.numerator = framerate_numerator;
+ format->highestFramerate.denominator = framerate_denominator;
+}
+
+//find the framerates at which the selected format can handle input
+void
+VideoInputGst::getSupportedFramerates
+ (WebcamVidFormat *video_format, GstStructure *structure)
+{
+ const GValue *framerates;
+ gint i, j;
+
+ //note that framerates may contain one value, a list, or a range
+ framerates = gst_structure_get_value (structure, "framerate");
+ if (GST_VALUE_HOLDS_FRACTION (framerates)) {
+ video_format->numFramerates = 1;
+ video_format->framerates =
+ g_new0 (FramerateFraction, video_format->numFramerates);
+ video_format->framerates[0].numerator =
+ gst_value_get_fraction_numerator (framerates);
+ video_format->framerates[0].denominator =
+ gst_value_get_fraction_denominator (framerates);
+ }
+ else if (GST_VALUE_HOLDS_LIST (framerates)) {
+ video_format->numFramerates = gst_value_list_get_size (framerates);
+ video_format->framerates =
+ g_new0 (FramerateFraction, video_format->numFramerates);
+ for (i = 0; i < video_format->numFramerates; i++) {
+ const GValue *value;
+ value = gst_value_list_get_value (framerates, i);
+ video_format->framerates[i].numerator =
+ gst_value_get_fraction_numerator (value);
+ video_format->framerates[i].denominator =
+ gst_value_get_fraction_denominator (value);
+ }
+ }
+ else if (GST_VALUE_HOLDS_FRACTION_RANGE (framerates)) {
+ gint numerator_min, denominator_min, numerator_max, denominator_max;
+ const GValue *fraction_range_min;
+ const GValue *fraction_range_max;
+
+ fraction_range_min =
+ gst_value_get_fraction_range_min (framerates);
+ numerator_min =
+ gst_value_get_fraction_numerator (fraction_range_min);
+ denominator_min =
+ gst_value_get_fraction_denominator (fraction_range_min);
+
+ fraction_range_max = gst_value_get_fraction_range_max (framerates);
+ numerator_max =
+ gst_value_get_fraction_numerator (fraction_range_max);
+ denominator_max =
+ gst_value_get_fraction_denominator (fraction_range_max);
+ log_debug ("FractionRange: %d/%d - %d/%d",
+ numerator_min, denominator_min, numerator_max, denominator_max);
+
+ video_format->numFramerates =
+ (numerator_max - numerator_min + 1) *
+ (denominator_max - denominator_min + 1);
+ video_format->framerates =
+ g_new0 (FramerateFraction, video_format->numFramerates);
+ int k = 0;
+ for (i = numerator_min; i <= numerator_max; i++) {
+ for (j = denominator_min; j <= denominator_max; j++) {
+ video_format->framerates[k].numerator = i;
+ video_format->framerates[k].denominator = j;
+ k++;
+ }
+ }
+ }
+ else {
+ g_critical ("GValue type %s, cannot be handled for framerates",
+ G_VALUE_TYPE_NAME (framerates));
+ }
+}
+
+//we found a supported framerate and want to add the information to
+//the GnashWebcam structure
+void
+VideoInputGst::addSupportedFormat(GnashWebcam *cam, WebcamVidFormat
*video_format,
+ GstStructure *format_structure)
+{
+ gint i;
+ gchar *resolution;
+
+ getSupportedFramerates(video_format, format_structure);
+ findHighestFramerate(video_format);
+
+ resolution = g_strdup_printf ("%ix%i", video_format->width,
+ video_format->height);
+ i = GPOINTER_TO_INT(g_hash_table_lookup (cam->supportedResolutions,
resolution));
+
+ //if i returns a value, maybe this resolution has been added previously?
+ if(i) {
+ WebcamVidFormat *curr_format =
+ &g_array_index(cam->videoFormats, WebcamVidFormat, i - 1);
+ gfloat new_framerate =
(float)(video_format->highestFramerate.numerator /
+ video_format->highestFramerate.denominator);
+ gfloat curr_framerate =
(float)(curr_format->highestFramerate.numerator /
+ curr_format->highestFramerate.denominator);
+ if (new_framerate > curr_framerate) {
+ log_debug("higher framerate replacing existing format");
+ *curr_format = *video_format;
+ }
+
+ g_free (resolution);
+
+ return;
+ }
+
+ g_array_append_val (cam->videoFormats, *video_format);
+ g_hash_table_insert (cam->supportedResolutions, resolution,
+ GINT_TO_POINTER(cam->numVideoFormats + 1));
+
+ cam->numVideoFormats++;
+}
+
+//pulls webcam device selection from gnashrc (will eventually tie into
+//gui)
+int
+VideoInputGst::makeWebcamDeviceSelection()
+{
+ int dev_select;
+ dev_select = rcfile.getWebcamDevice();
+ if (dev_select == -1) {
+ log_debug("%s: No webcam selected in rc file, setting to
videotestsource",
+ __FUNCTION__);
+ rcfile.setWebcamDevice(0);
dev_select = rcfile.getWebcamDevice();
- if (dev_select == -1) {
- log_debug("%s: No webcam selected in rc file, setting to
videotestsource",
- __FUNCTION__);
- rcfile.setWebcamDevice(0);
- dev_select = rcfile.getWebcamDevice();
- } else {
- log_debug("Camera %d specified in gnashrc file, using that one.",
- dev_select);
- }
- //make sure that the device selected is actually valid
-
- const int webcamDevice = rcfile.getWebcamDevice();
- if (webcamDevice < 0 ||
- static_cast<size_t>(webcamDevice) >= _vidVect.size()) {
-
- log_error("You have an invalid camera selected. Please "
- "check your gnashrc file");
- exit(EXIT_FAILURE);
- }
-
- //set _name value for actionscript
- _name = _vidVect[dev_select]->getProductName();
-
- //now that a selection has been made, get capabilities of that device
- getSelectedCaps(rcfile.getWebcamDevice());
- return rcfile.getWebcamDevice();
- }
-
- //called after a device selection, this starts enumerating the device's
- //capabilities
- void
- VideoInputGst::getSelectedCaps(gint dev_select)
- {
- GstElement *pipeline;
- gchar *command;
- GError *error = NULL;
- GstStateChangeReturn return_val;
- GstBus *bus;
- GstMessage *message;
-
- GnashWebcam *data_struct = _vidVect[dev_select];
- GstElement *element;
- element = data_struct->getElementPtr();
-
- if (dev_select < 0 ||
- static_cast<size_t>(dev_select) >= _vidVect.size()) {
- log_error("%s: Passed an invalid argument (not a valid "
- "dev_select value)", __FUNCTION__);
- exit(EXIT_FAILURE);
- }
-
- //create tester pipeline to enumerate properties
- if (dev_select == 0) {
- command = g_strdup_printf ("%s name=src ! fakesink",
- data_struct->getGstreamerSrc());
- }
- else {
- command = g_strdup_printf ("%s name=src device=%s ! fakesink",
- data_struct->getGstreamerSrc(), data_struct->getDevLocation());
- }
- pipeline = gst_parse_launch(command, &error);
- if ((pipeline != NULL) && (error == NULL)) {
- //Wait at most 5 seconds for the pipeline to start playing
- gst_element_set_state (pipeline, GST_STATE_PLAYING);
- return_val =
- gst_element_get_state (pipeline, NULL, NULL, 5 * GST_SECOND);
-
- //errors on bus?
- bus = gst_element_get_bus (pipeline);
- message = gst_bus_poll (bus, GST_MESSAGE_ERROR, 0);
-
- if (GST_IS_OBJECT(bus)){
- gst_object_unref (bus);
- } else {
- log_error("%s: Pipeline bus isn't an object for some reason",
- __FUNCTION__);
- }
-
- //if everything above worked properly, begin probing for values
- if ((return_val == GST_STATE_CHANGE_SUCCESS) && (message == NULL))
{
- GstElement *src;
- GstPad *pad;
- GstCaps *caps;
-
- gst_element_set_state(pipeline, GST_STATE_PAUSED);
-
- src = gst_bin_get_by_name(GST_BIN(pipeline), "src");
-
- //get the pad, find the capabilities for probing in supported
formats
- pad = gst_element_get_pad (src, "src");
- caps = gst_pad_get_caps (pad);
- if (GST_IS_OBJECT(pad)) {
- gst_object_unref (pad);
- } else {
- log_error("%s: Template pad isn't an object for some
reason",
- __FUNCTION__);
- }
- if (dev_select != 0) {
- getSupportedFormats(data_struct, caps);
- }
-
- gst_caps_unref (caps);
- }
- gst_element_set_state (pipeline, GST_STATE_NULL);
- if (GST_IS_OBJECT(pipeline)){
- gst_object_unref (pipeline);
- } else {
- log_error("%s: pipeline isn't an object for some reason",
- __FUNCTION__);
- }
- }
-
- if (error) {
- g_error_free (error);
- }
- g_free (command);
- }
-
- //probe the selected camera for the formats it supports
- void
- VideoInputGst::getSupportedFormats(GnashWebcam *cam, GstCaps *caps)
- {
- gint i;
- gint num_structs;
-
- num_structs = gst_caps_get_size (caps);
-
- for (i=0; i < num_structs; i++) {
- GstStructure *structure;
- const GValue *width, *height;
-
- //this structure is used to probe the source for information
- structure = gst_caps_get_structure (caps, i);
-
- //we just want to enumerate raw formats to keep things consistent
- //so if the strcuture we're currently looking at isn't either of
- //the standard raw formats, keep iterating through the loop
- if (!gst_structure_has_name (structure, "video/x-raw-yuv") &&
- !gst_structure_has_name (structure, "video/x-raw-rgb"))
- {
- continue;
- }
-
- width = gst_structure_get_value (structure, "width");
- height = gst_structure_get_value (structure, "height");
-
- if (G_VALUE_HOLDS_INT (width)) {
- WebcamVidFormat video_format;
-
- video_format.mimetype =
- g_strdup (gst_structure_get_name (structure));
- gst_structure_get_int (structure, "width",
&(video_format.width));
- gst_structure_get_int (structure, "height",
&(video_format.height));
- addSupportedFormat(cam, &video_format, structure);
- }
- else if (GST_VALUE_HOLDS_INT_RANGE (width)) {
- int min_width, max_width, min_height, max_height;
- int cur_width, cur_height;
-
- min_width = gst_value_get_int_range_min (width);
- max_width = gst_value_get_int_range_max (width);
- min_height = gst_value_get_int_range_min (height);
- max_height = gst_value_get_int_range_max (height);
-
- cur_width = min_width;
- cur_height = min_height;
- while (cur_width <= max_width && cur_height <= max_height) {
- WebcamVidFormat video_format;
-
- video_format.mimetype =
- g_strdup (gst_structure_get_name (structure));
- video_format.width = cur_width;
- video_format.height = cur_height;
- addSupportedFormat(cam, &video_format, structure);
- cur_width *= 2;
- cur_height *= 2;
- }
-
- cur_width = max_width;
- cur_height = max_height;
- while (cur_width > min_width && cur_height > min_height) {
- WebcamVidFormat video_format;
-
- video_format.mimetype =
- g_strdup (gst_structure_get_name (structure));
- video_format.width = cur_width;
- video_format.height = cur_height;
- addSupportedFormat(cam, &video_format, structure);
- cur_width /= 2;
- cur_height /= 2;
- }
- }
- else {
- log_error("%s: type %s, cannot be handled for resolution
width",
- __FUNCTION__, G_VALUE_TYPE_NAME (width));
- }
- }
- }
-
- //move the selected camera information to a more robust data structure
- //to store pipeline-ing information
- GnashWebcamPrivate*
- VideoInputGst::transferToPrivate(gint dev_select)
- {
- if (dev_select < 0 ||
- static_cast<size_t>(dev_select) >= _vidVect.size()) {
-
- log_error("%s: Passed an invalid argument (bad dev_select value)",
- __FUNCTION__);
- exit(EXIT_FAILURE);
- }
- GnashWebcamPrivate *webcam = new GnashWebcamPrivate;
- if (webcam != NULL) {
- webcam->setWebcamDevice(_vidVect[dev_select]);
- webcam->setDeviceName(_vidVect[dev_select]->getProductName());
- _globalWebcam = webcam;
- } else {
- log_error("%s: was passed a NULL pointer", __FUNCTION__);
- }
- return webcam;
- }
-
- //create a bin containing the source and a connector ghostpad
- gboolean
- VideoInputGst::webcamCreateSourceBin(GnashWebcamPrivate *webcam)
- {
- GError *error = NULL;
- gchar *command = NULL;
-
- if(webcam->_webcamDevice == NULL) {
- log_debug("%s: You don't have any webcams chosen, using
videotestsrc",
- __FUNCTION__);
- webcam->_webcamSourceBin = gst_parse_bin_from_description (
- "videotestsrc name=video_source ! capsfilter name=capsfilter",
- TRUE, &error);
- log_debug("Command: videotestsrc name=video_source ! \
- capsfilter name=capsfilter");
- }
- else {
- WebcamVidFormat *format = NULL;
- gint i;
- gchar *resolution;
-
- resolution = g_strdup_printf("%ix%i", _width, _height);
-
- //use these resolutions determined above if the camera supports it
- if (_width != 0 && _height != 0) {
-
- i = GPOINTER_TO_INT(g_hash_table_lookup
- (webcam->_webcamDevice->supportedResolutions, resolution));
- //the selected res is supported if i
- if (i) {
- format = &g_array_index
(webcam->_webcamDevice->videoFormats,
- WebcamVidFormat, i - 1);
- }
- }
-
- //if format didn't get set, something went wrong. try picking
- //the first supported format and a different supported resolution
- if (!format) {
- format = &g_array_index (webcam->_webcamDevice->videoFormats,
- WebcamVidFormat, 0);
- for (i = 1; i < webcam->_webcamDevice->numVideoFormats; i++) {
- if (g_array_index (webcam->_webcamDevice->videoFormats,
- WebcamVidFormat, i).width <= format->width){
- format = &g_array_index
(webcam->_webcamDevice->videoFormats,
- WebcamVidFormat, i);
- }
- }
- }
-
- webcam->_currentFormat = format;
- g_free(resolution);
-
- //if format isn't set, something is still going wrong, make generic
- //components and see if they work!
- if (format == NULL) {
- if (error != NULL) {
- g_error_free (error);
- error = NULL;
- }
- webcam->_webcamSourceBin =
- gst_parse_bin_from_description ("videotestsrc
name=video_source",
- TRUE, &error);
- webcam->_videoSource =
- gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
- "video_source");
-
- //if there are still errors, something's up, return out of
function
- if (error != NULL) {
- g_error_free (error);
- return false;
- }
- webcam->_capsFilter =
- gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
- "capsfilter");
- return true;
- }
-
- //execution here means we're good to make the pipeline
- else {
- //can't reduce this to 80 line limit without causing problems
- command = g_strdup_printf (
- "%s name=video_source device=%s ! capsfilter name=capsfilter
caps=video/x-raw-rgb,width=%d,height=%d,framerate=%d/%d;video/x-raw-yuv,width=%d,height=%d,framerate=%d/%d",
- webcam->_webcamDevice->getGstreamerSrc(),
- webcam->_webcamDevice->getDevLocation(),
- format->width,
- format->height,
- format->highestFramerate.numerator,
- format->highestFramerate.denominator,
- format->width,
- format->height,
- format->highestFramerate.numerator,
- format->highestFramerate.denominator);
-
- //debug
- log_debug("GstPipeline command is: %s", command);
-
- webcam->_webcamSourceBin =
- gst_parse_bin_from_description (command, TRUE, &error);
- if (webcam->_webcamSourceBin == NULL) {
- log_error ("%s: Creation of the webcam_source_bin failed",
- __FUNCTION__);
- log_error ("the error was %s", error->message);
- return false;
- }
-
- //set _currentFps value for actionscript
- _currentFPS = (format->highestFramerate.numerator /
- format->highestFramerate.denominator);
-
- g_free(command);
-
- webcam->_videoSource =
- gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
- "video_source");
- webcam->_capsFilter =
- gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
- "capsfilter");
- return true;
- }
- }
- return true;
- }
-
- gboolean
- VideoInputGst::checkForSupportedFramerate(GnashWebcamPrivate *webcam,
- int fps)
- {
-
- for (int i = 0; i < webcam->_currentFormat->numFramerates; ++i) {
- int val = std::ceil(
- webcam->_currentFormat->framerates[i].numerator /
- webcam->_currentFormat->framerates[i].denominator);
- if (val == fps) {
- return true;
- }
- }
- return false;
- }
-
- gboolean
- VideoInputGst::webcamChangeSourceBin(GnashWebcamPrivate *webcam)
- {
- GError *error = NULL;
- gchar *command = NULL;
-
- if(webcam->_pipelineIsPlaying == true) {
- webcamStop(webcam);
- }
-
- //delete the old source bin
- gst_bin_remove(GST_BIN(webcam->_webcamMainBin),
webcam->_webcamSourceBin);
- webcam->_webcamSourceBin = NULL;
-
- if(webcam->_webcamDevice == NULL) {
- log_debug("%s: You don't have any webcams chosen, using
videotestsrc",
- __FUNCTION__);
- webcam->_webcamSourceBin = gst_parse_bin_from_description (
- "videotestsrc name=video_source ! capsfilter name=capsfilter",
- TRUE, &error);
- log_debug("Command: videotestsrc name=video_source ! \
- capsfilter name=capsfilter");
- }
- else {
- WebcamVidFormat *format = NULL;
- gint i;
- gchar *resolution;
-
- resolution = g_strdup_printf("%ix%i", _width, _height);
-
- //use these resolutions determined above if the camera supports it
- if (_width != 0 && _height != 0) {
-
- i = GPOINTER_TO_INT(g_hash_table_lookup
- (webcam->_webcamDevice->supportedResolutions, resolution));
- //the selected res is supported if i
- if (i) {
- format = &g_array_index
(webcam->_webcamDevice->videoFormats,
- WebcamVidFormat, i - 1);
- }
- }
-
- //if format didn't get set, something went wrong. try picking
- //the first supported format and a different supported resolution
- if (!format) {
- log_error("%s: the resolution you chose isn't supported,
picking \
- a supported value", __FUNCTION__);
- format = &g_array_index (webcam->_webcamDevice->videoFormats,
- WebcamVidFormat, 0);
- for (i = 1; i < webcam->_webcamDevice->numVideoFormats; i++) {
- if (g_array_index (webcam->_webcamDevice->videoFormats,
- WebcamVidFormat, i).width <= format->width){
- format = &g_array_index
(webcam->_webcamDevice->videoFormats,
- WebcamVidFormat, i);
- }
- }
- }
-
- //check here to make sure the fps value is supported (only valid
for
- //non test sources)
- if (! g_strcmp0(webcam->_webcamDevice->getGstreamerSrc(),
"videotestsrc") == 0) {
- int newFps = _fps;
- if (checkForSupportedFramerate(webcam, newFps)) {
- log_debug("checkforsupportedfr returned true");
- format->highestFramerate.numerator = newFps;
- format->highestFramerate.denominator = 1;
- } else {
- log_debug("checkforsupportedfr returned false");
-
- //currently chooses the ActionScript default of 15 fps in
case
- //you pass in an unsupported framerate value
- format->highestFramerate.numerator = 15;
- format->highestFramerate.denominator = 1;
- }
- }
- webcam->_currentFormat = format;
- g_free(resolution);
-
- //if format isn't set, something is still going wrong, make generic
- //components and see if they work!
- if (format == NULL) {
- if (error != NULL) {
- g_error_free (error);
- error = NULL;
- }
- webcam->_webcamSourceBin =
- gst_parse_bin_from_description ("videotestsrc
name=video_source",
- TRUE, &error);
- webcam->_videoSource =
- gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
- "video_source");
-
- //if there are still errors, something's up, return out of
function
- if (error != NULL) {
- g_error_free (error);
- return false;
- }
- webcam->_capsFilter =
- gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
- "capsfilter");
- return true;
- }
-
- //execution here means we're good to make the pipeline
- else {
- //can't reduce this to 80 line limit without causing problems
- command = g_strdup_printf (
- "%s name=video_source device=%s ! capsfilter name=capsfilter
caps=video/x-raw-rgb,width=%d,height=%d,framerate=%d/%d;video/x-raw-yuv,width=%d,height=%d,framerate=%d/%d",
- webcam->_webcamDevice->getGstreamerSrc(),
- webcam->_webcamDevice->getDevLocation(),
- format->width,
- format->height,
- format->highestFramerate.numerator,
- format->highestFramerate.denominator,
- format->width,
- format->height,
- format->highestFramerate.numerator,
- format->highestFramerate.denominator);
-
- //debug
- log_debug ("GstPipeline command is: %s", command);
-
- webcam->_webcamSourceBin =
- gst_parse_bin_from_description (command, TRUE, &error);
- if (webcam->_webcamSourceBin == NULL) {
- log_error ("%s: Creation of the webcam_source_bin failed",
- __FUNCTION__);
- log_error ("the error was %s", error->message);
- return false;
- }
-
- g_free(command);
-
- //set _currentFps for actionscript
- _currentFPS = (format->highestFramerate.numerator /
- format->highestFramerate.denominator);
-
- webcam->_videoSource =
- gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
- "video_source");
- webcam->_capsFilter =
- gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
- "capsfilter");
-
- //drop the new source bin back into the main bin
- gboolean result;
- result = gst_bin_add(GST_BIN(webcam->_webcamMainBin),
- webcam->_webcamSourceBin);
+ } else {
+ log_debug("Camera %d specified in gnashrc file, using that one.",
+ dev_select);
+ }
+ //make sure that the device selected is actually valid
+
+ const int webcamDevice = rcfile.getWebcamDevice();
+ if (webcamDevice < 0 ||
+ static_cast<size_t>(webcamDevice) >= _vidVect.size()) {
+
+ log_error("You have an invalid camera selected. Please "
+ "check your gnashrc file");
+ exit(EXIT_FAILURE);
+ }
+
+ //set _name value for actionscript
+ _name = _vidVect[dev_select]->getProductName();
+
+ //now that a selection has been made, get capabilities of that device
+ getSelectedCaps(rcfile.getWebcamDevice());
+ return rcfile.getWebcamDevice();
+}
+
+//called after a device selection, this starts enumerating the device's
+//capabilities
+void
+VideoInputGst::getSelectedCaps(gint dev_select)
+{
+ GstElement *pipeline;
+ gchar *command;
+ GError *error = NULL;
+ GstStateChangeReturn return_val;
+ GstBus *bus;
+ GstMessage *message;
+
+ GnashWebcam *data_struct = _vidVect[dev_select];
+ GstElement *element;
+ element = data_struct->getElementPtr();
+
+ if (dev_select < 0 ||
+ static_cast<size_t>(dev_select) >= _vidVect.size()) {
+ log_error("%s: Passed an invalid argument (not a valid "
+ "dev_select value)", __FUNCTION__);
+ exit(EXIT_FAILURE);
+ }
+
+ //create tester pipeline to enumerate properties
+ if (dev_select == 0) {
+ command = g_strdup_printf ("%s name=src ! fakesink",
+ data_struct->getGstreamerSrc());
+ }
+ else {
+ command = g_strdup_printf ("%s name=src device=%s ! fakesink",
+ data_struct->getGstreamerSrc(), data_struct->getDevLocation());
+ }
+ pipeline = gst_parse_launch(command, &error);
+ if ((pipeline != NULL) && (error == NULL)) {
+ //Wait at most 5 seconds for the pipeline to start playing
+ gst_element_set_state (pipeline, GST_STATE_PLAYING);
+ return_val =
+ gst_element_get_state (pipeline, NULL, NULL, 5 * GST_SECOND);
+
+ //errors on bus?
+ bus = gst_element_get_bus (pipeline);
+ message = gst_bus_poll (bus, GST_MESSAGE_ERROR, 0);
+
+ if (GST_IS_OBJECT(bus)){
+ gst_object_unref (bus);
+ } else {
+ log_error("%s: Pipeline bus isn't an object for some reason",
+ __FUNCTION__);
+ }
+
+ //if everything above worked properly, begin probing for values
+ if ((return_val == GST_STATE_CHANGE_SUCCESS) && (message == NULL)) {
+ GstElement *src;
+ GstPad *pad;
+ GstCaps *caps;
+
+ gst_element_set_state(pipeline, GST_STATE_PAUSED);
+
+ src = gst_bin_get_by_name(GST_BIN(pipeline), "src");
+
+ //get the pad, find the capabilities for probing in supported
formats
+ pad = gst_element_get_pad (src, "src");
+ caps = gst_pad_get_caps (pad);
+ if (GST_IS_OBJECT(pad)) {
+ gst_object_unref (pad);
+ } else {
+ log_error("%s: Template pad isn't an object for some reason",
+ __FUNCTION__);
+ }
+ if (dev_select != 0) {
+ getSupportedFormats(data_struct, caps);
+ }
+
+ gst_caps_unref (caps);
+ }
+ gst_element_set_state (pipeline, GST_STATE_NULL);
+ if (GST_IS_OBJECT(pipeline)){
+ gst_object_unref (pipeline);
+ } else {
+ log_error("%s: pipeline isn't an object for some reason",
+ __FUNCTION__);
+ }
+ }
+
+ if (error) {
+ g_error_free (error);
+ }
+ g_free (command);
+}
+
+//probe the selected camera for the formats it supports
+void
+VideoInputGst::getSupportedFormats(GnashWebcam *cam, GstCaps *caps)
+{
+ gint i;
+ gint num_structs;
+
+ num_structs = gst_caps_get_size (caps);
+
+ for (i=0; i < num_structs; i++) {
+ GstStructure *structure;
+ const GValue *width, *height;
+
+ //this structure is used to probe the source for information
+ structure = gst_caps_get_structure (caps, i);
+
+ //we just want to enumerate raw formats to keep things consistent
+ //so if the strcuture we're currently looking at isn't either of
+ //the standard raw formats, keep iterating through the loop
+ if (!gst_structure_has_name (structure, "video/x-raw-yuv") &&
+ !gst_structure_has_name (structure, "video/x-raw-rgb"))
+ {
+ continue;
+ }
+
+ width = gst_structure_get_value (structure, "width");
+ height = gst_structure_get_value (structure, "height");
+
+ if (G_VALUE_HOLDS_INT (width)) {
+ WebcamVidFormat video_format;
+
+ video_format.mimetype =
+ g_strdup (gst_structure_get_name (structure));
+ gst_structure_get_int (structure, "width",
&(video_format.width));
+ gst_structure_get_int (structure, "height",
&(video_format.height));
+ addSupportedFormat(cam, &video_format, structure);
+ }
+ else if (GST_VALUE_HOLDS_INT_RANGE (width)) {
+ int min_width, max_width, min_height, max_height;
+ int cur_width, cur_height;
+
+ min_width = gst_value_get_int_range_min (width);
+ max_width = gst_value_get_int_range_max (width);
+ min_height = gst_value_get_int_range_min (height);
+ max_height = gst_value_get_int_range_max (height);
+
+ cur_width = min_width;
+ cur_height = min_height;
+ while (cur_width <= max_width && cur_height <= max_height) {
+ WebcamVidFormat video_format;
+
+ video_format.mimetype =
+ g_strdup (gst_structure_get_name (structure));
+ video_format.width = cur_width;
+ video_format.height = cur_height;
+ addSupportedFormat(cam, &video_format, structure);
+ cur_width *= 2;
+ cur_height *= 2;
+ }
+
+ cur_width = max_width;
+ cur_height = max_height;
+ while (cur_width > min_width && cur_height > min_height) {
+ WebcamVidFormat video_format;
+
+ video_format.mimetype =
+ g_strdup (gst_structure_get_name (structure));
+ video_format.width = cur_width;
+ video_format.height = cur_height;
+ addSupportedFormat(cam, &video_format, structure);
+ cur_width /= 2;
+ cur_height /= 2;
+ }
+ }
+ else {
+ log_error("%s: type %s, cannot be handled for resolution width",
+ __FUNCTION__, G_VALUE_TYPE_NAME (width));
+ }
+ }
+}
+
+//move the selected camera information to a more robust data structure
+//to store pipeline-ing information
+bool
+VideoInputGst::setWebcam(size_t dev_select)
+{
+ assert(dev_select < _vidVect.size());
+
+ GnashWebcamPrivate *webcam = new GnashWebcamPrivate;
+ if (webcam) {
+ webcam->setWebcamDevice(_vidVect[dev_select]);
+ const char* name = _vidVect[dev_select]->getProductName();
+ assert(name);
+ _name = name;
+ _globalWebcam = webcam;
+ } else {
+ log_error("%s: was passed a NULL pointer", __FUNCTION__);
+ }
+ return webcam;
+}
+
+//create a bin containing the source and a connector ghostpad
+gboolean
+VideoInputGst::webcamCreateSourceBin()
+{
+ GError *error = NULL;
+ gchar *command = NULL;
+
+ GnashWebcamPrivate* webcam = _globalWebcam;
+
+ if(webcam->_webcamDevice == NULL) {
+ log_debug("%s: You don't have any webcams chosen, using videotestsrc",
+ __FUNCTION__);
+ webcam->_webcamSourceBin = gst_parse_bin_from_description (
+ "videotestsrc name=video_source ! capsfilter name=capsfilter",
+ TRUE, &error);
+ log_debug("Command: videotestsrc name=video_source ! \
+ capsfilter name=capsfilter");
+ }
+ else {
+ WebcamVidFormat *format = NULL;
+
+ std::ostringstream ss;
+ ss << _width << 'x' << _height;
+ const std::string& res = ss.str();
+
+ //use these resolutions determined above if the camera supports it
+ if (_width != 0 && _height != 0) {
+
+ int i = GPOINTER_TO_INT(g_hash_table_lookup
+ (webcam->_webcamDevice->supportedResolutions, res.c_str()));
+ //the selected res is supported if i
+ if (i) {
+ format = &g_array_index (webcam->_webcamDevice->videoFormats,
+ WebcamVidFormat, i - 1);
+ }
+ }
+
+ //if format didn't get set, something went wrong. try picking
+ //the first supported format and a different supported resolution
+ if (!format) {
+ format = &g_array_index (webcam->_webcamDevice->videoFormats,
+ WebcamVidFormat, 0);
+ for (int i = 1; i < webcam->_webcamDevice->numVideoFormats; ++i) {
+
+ if (g_array_index (webcam->_webcamDevice->videoFormats,
+ WebcamVidFormat, i).width <= format->width){
+ format = &g_array_index
(webcam->_webcamDevice->videoFormats,
+ WebcamVidFormat, i);
+ }
+ }
+ }
+
+ webcam->_currentFormat = format;
+
+ //if format isn't set, something is still going wrong, make generic
+ //components and see if they work!
+ if (format == NULL) {
+ if (error != NULL) {
+ g_error_free (error);
+ error = NULL;
+ }
+ webcam->_webcamSourceBin =
+ gst_parse_bin_from_description ("videotestsrc
name=video_source",
+ TRUE, &error);
+ webcam->_videoSource =
+ gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
+ "video_source");
+
+ //if there are still errors, something's up, return out of function
+ if (error != NULL) {
+ g_error_free (error);
+ return false;
+ }
+ webcam->_capsFilter =
+ gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
+ "capsfilter");
+ return true;
+ }
+
+ //execution here means we're good to make the pipeline
+ else {
+ //can't reduce this to 80 line limit without causing problems
+ command = g_strdup_printf (
+ "%s name=video_source device=%s ! capsfilter name=capsfilter
caps=video/x-raw-rgb,width=%d,height=%d,framerate=%d/%d;video/x-raw-yuv,width=%d,height=%d,framerate=%d/%d",
+ webcam->_webcamDevice->getGstreamerSrc(),
+ webcam->_webcamDevice->getDevLocation(),
+ format->width,
+ format->height,
+ format->highestFramerate.numerator,
+ format->highestFramerate.denominator,
+ format->width,
+ format->height,
+ format->highestFramerate.numerator,
+ format->highestFramerate.denominator);
+
+ //debug
+ log_debug("GstPipeline command is: %s", command);
+
+ webcam->_webcamSourceBin =
+ gst_parse_bin_from_description (command, TRUE, &error);
+ if (webcam->_webcamSourceBin == NULL) {
+ log_error ("%s: Creation of the webcam_source_bin failed",
+ __FUNCTION__);
+ log_error ("the error was %s", error->message);
+ return false;
+ }
+
+ //set _currentFps value for actionscript
+ _currentFPS = (format->highestFramerate.numerator /
+ format->highestFramerate.denominator);
+
+ g_free(command);
+
+ webcam->_videoSource =
+ gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
+ "video_source");
+ webcam->_capsFilter =
+ gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
+ "capsfilter");
+ return true;
+ }
+ }
+ return true;
+}
+
+gboolean
+VideoInputGst::checkForSupportedFramerate(GnashWebcamPrivate *webcam,
+ int fps)
+{
+
+ for (int i = 0; i < webcam->_currentFormat->numFramerates; ++i) {
+ int val = std::ceil(
+ webcam->_currentFormat->framerates[i].numerator /
+ webcam->_currentFormat->framerates[i].denominator);
+ if (val == fps) {
+ return true;
+ }
+ }
+ return false;
+}
+
+gboolean
+VideoInputGst::webcamChangeSourceBin()
+{
+ GError *error = NULL;
+ gchar *command = NULL;
+
+ assert(_globalWebcam);
+
+ if (_globalWebcam->_pipelineIsPlaying == true) {
+ stop();
+ }
+
+ //delete the old source bin
+ gst_bin_remove(GST_BIN(_globalWebcam->_webcamMainBin),
+ _globalWebcam->_webcamSourceBin);
+ _globalWebcam->_webcamSourceBin = NULL;
+
+ GnashWebcamPrivate* webcam = _globalWebcam;
+
+ if(webcam->_webcamDevice == NULL) {
+ log_debug("%s: You don't have any webcams chosen, using videotestsrc",
+ __FUNCTION__);
+ webcam->_webcamSourceBin = gst_parse_bin_from_description (
+ "videotestsrc name=video_source ! capsfilter name=capsfilter",
+ TRUE, &error);
+ log_debug("Command: videotestsrc name=video_source ! \
+ capsfilter name=capsfilter");
+ }
+ else {
+ WebcamVidFormat *format = NULL;
+
+ std::ostringstream ss;
+ ss << _width << 'x' << _height;
+ const std::string& res = ss.str();
+
+ //use these resolutions determined above if the camera supports it
+ if (_width != 0 && _height != 0) {
+
+ int i = GPOINTER_TO_INT(g_hash_table_lookup
+ (webcam->_webcamDevice->supportedResolutions, res.c_str()));
+ //the selected res is supported if i
+ if (i) {
+ format = &g_array_index (webcam->_webcamDevice->videoFormats,
+ WebcamVidFormat, i - 1);
+ }
+ }
+
+ //if format didn't get set, something went wrong. try picking
+ //the first supported format and a different supported resolution
+ if (!format) {
+ log_error("%s: the resolution you chose isn't supported, picking \
+ a supported value", __FUNCTION__);
+ format = &g_array_index (webcam->_webcamDevice->videoFormats,
+ WebcamVidFormat, 0);
+
+ for (int i = 1; i < webcam->_webcamDevice->numVideoFormats; ++i) {
+ if (g_array_index (webcam->_webcamDevice->videoFormats,
+ WebcamVidFormat, i).width <= format->width){
+ format = &g_array_index
(webcam->_webcamDevice->videoFormats,
+ WebcamVidFormat, i);
+ }
+ }
+ }
+
+ //check here to make sure the fps value is supported (only valid for
+ //non test sources)
+ if (! g_strcmp0(webcam->_webcamDevice->getGstreamerSrc(),
"videotestsrc") == 0) {
+ int newFps = _fps;
+ if (checkForSupportedFramerate(webcam, newFps)) {
+ log_debug("checkforsupportedfr returned true");
+ format->highestFramerate.numerator = newFps;
+ format->highestFramerate.denominator = 1;
+ } else {
+ log_debug("checkforsupportedfr returned false");
+
+ //currently chooses the ActionScript default of 15 fps in case
+ //you pass in an unsupported framerate value
+ format->highestFramerate.numerator = 15;
+ format->highestFramerate.denominator = 1;
+ }
+ }
+ webcam->_currentFormat = format;
+
+ //if format isn't set, something is still going wrong, make generic
+ //components and see if they work!
+ if (format == NULL) {
+ if (error != NULL) {
+ g_error_free (error);
+ error = NULL;
+ }
+ webcam->_webcamSourceBin =
+ gst_parse_bin_from_description ("videotestsrc
name=video_source",
+ TRUE, &error);
+ webcam->_videoSource =
+ gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
+ "video_source");
+
+ //if there are still errors, something's up, return out of function
+ if (error != NULL) {
+ g_error_free (error);
+ return false;
+ }
+ webcam->_capsFilter =
+ gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
+ "capsfilter");
+ return true;
+ }
+
+ //execution here means we're good to make the pipeline
+ else {
+ //can't reduce this to 80 line limit without causing problems
+ command = g_strdup_printf (
+ "%s name=video_source device=%s ! capsfilter name=capsfilter
caps=video/x-raw-rgb,width=%d,height=%d,framerate=%d/%d;video/x-raw-yuv,width=%d,height=%d,framerate=%d/%d",
+ webcam->_webcamDevice->getGstreamerSrc(),
+ webcam->_webcamDevice->getDevLocation(),
+ format->width,
+ format->height,
+ format->highestFramerate.numerator,
+ format->highestFramerate.denominator,
+ format->width,
+ format->height,
+ format->highestFramerate.numerator,
+ format->highestFramerate.denominator);
+
+ //debug
+ log_debug ("GstPipeline command is: %s", command);
+
+ webcam->_webcamSourceBin =
+ gst_parse_bin_from_description (command, TRUE, &error);
+ if (webcam->_webcamSourceBin == NULL) {
+ log_error ("%s: Creation of the webcam_source_bin failed",
+ __FUNCTION__);
+ log_error ("the error was %s", error->message);
+ return false;
+ }
+
+ g_free(command);
+
+ //set _currentFps for actionscript
+ _currentFPS = (format->highestFramerate.numerator /
+ format->highestFramerate.denominator);
+
+ webcam->_videoSource =
+ gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
+ "video_source");
+ webcam->_capsFilter =
+ gst_bin_get_by_name (GST_BIN (webcam->_webcamSourceBin),
+ "capsfilter");
+
+ //drop the new source bin back into the main bin
+ gboolean result;
+ result = gst_bin_add(GST_BIN(webcam->_webcamMainBin),
+ webcam->_webcamSourceBin);
+ if (result != true) {
+ log_error("%s: couldn't drop the sourcebin back into the main
bin",
+ __FUNCTION__);
+ return false;
+ } else {
+ //get the tee from main bin
+ GstElement *tee =
gst_bin_get_by_name(GST_BIN(webcam->_webcamMainBin),
+ "tee");
+ result = gst_element_link(webcam->_webcamSourceBin, tee);
if (result != true) {
- log_error("%s: couldn't drop the sourcebin back into the
main bin",
- __FUNCTION__);
- return false;
- } else {
- //get the tee from main bin
- GstElement *tee =
gst_bin_get_by_name(GST_BIN(webcam->_webcamMainBin),
- "tee");
- result = gst_element_link(webcam->_webcamSourceBin, tee);
- if (result != true) {
- log_error("%s: couldn't link up sourcebin and tee",
__FUNCTION__);
- return false;
- } else {
- return true;
- }
- }
- }
- }
- return true;
- }
-
- //create a display bin that has ghostpads which allow display-to-screen
- //capabilities as well as save-to-file or buffer capabilities (both
- //implemented as bin ghostpads)
- gboolean
- VideoInputGst::webcamCreateMainBin(GnashWebcamPrivate *webcam)
- {
- GstElement *tee, *video_display_queue, *save_queue;
- gboolean ok;
- GstPad *pad;
-
- //initialize a new GST pipeline
- webcam->_pipeline = gst_pipeline_new("pipeline");
-
- webcam->_webcamMainBin = gst_bin_new ("webcam_main_bin");
-
- ok = webcamCreateSourceBin(webcam);
- if (ok != true) {
- log_error("%s: problem creating source bin", __FUNCTION__);
- return false;
- }
-
- if ((tee = gst_element_factory_make ("tee", "tee")) == NULL) {
- log_error("%s: problem creating tee element", __FUNCTION__);
- return false;
- }
- if ((save_queue = gst_element_factory_make("queue", "save_queue")) ==
NULL) {
- log_error("%s: problem creating save_queue element", __FUNCTION__);
- return false;
- }
- if ((video_display_queue =
- gst_element_factory_make("queue", "video_display_queue")) == NULL)
{
- log_error("%s: problem creating video_display_queue element",
__FUNCTION__);
- return false;
- }
-
- //add created elements to a bin
- gst_bin_add_many (GST_BIN (webcam->_webcamMainBin),
webcam->_webcamSourceBin,
- tee, save_queue, video_display_queue, NULL);
-
- ok = gst_element_link(webcam->_webcamSourceBin, tee);
- if (ok != true) {
- log_error("%s: couldn't link webcam_source_bin and tee",
__FUNCTION__);
- return false;
- }
-
- ok &= gst_element_link_many (tee, save_queue, NULL);
- if (ok != true) {
- log_error("%s: couldn't link tee and save_queue", __FUNCTION__);
- return false;
- }
-
- ok &= gst_element_link_many (tee, video_display_queue, NULL);
- if (ok != true) {
- log_error("%s: couldn't link tee and video_display_queue",
__FUNCTION__);
- return false;
- }
-
- gst_bin_add (GST_BIN(webcam->_pipeline), webcam->_webcamMainBin);
-
- //add ghostpad to save_queue (allows connections between bins)
- pad = gst_element_get_pad (save_queue, "src");
- if (pad == NULL) {
- log_error("%s: couldn't get save_queue_src_pad", __FUNCTION__);
- return false;
- }
- gst_element_add_pad (webcam->_webcamMainBin,
- gst_ghost_pad_new ("save_queue_src", pad));
- gst_object_unref (GST_OBJECT (pad));
-
- //add ghostpad to video_display_queue
- pad = gst_element_get_pad (video_display_queue, "src");
- if (pad == NULL) {
- log_error("%s: couldn't get video_display_queue_pad",
__FUNCTION__);
- return false;
- }
- gst_element_add_pad (webcam->_webcamMainBin,
- gst_ghost_pad_new ("video_display_queue_src", pad));
- gst_object_unref (GST_OBJECT (pad));
-
-
- if (!ok) {
- log_error("%s: Unable to create main pipeline", __FUNCTION__);
- return false;
- }
- return true;
- }
-
- gboolean
- VideoInputGst::webcamCreateDisplayBin(GnashWebcamPrivate *webcam)
- {
- GstElement *video_scale, *video_sink;
- gboolean ok;
- GstPad *pad;
-
- webcam->_videoDisplayBin = gst_bin_new("video_display_bin");
-
- if (webcam->_videoDisplayBin == NULL) {
- log_error("%s: something went wrong creating the new
video_display_bin",
- __FUNCTION__);
- return false;
- }
-
- if ((video_scale = gst_element_factory_make("videoscale",
"video_scale")) == NULL) {
- log_error("%s: problem creating video_scale element",
__FUNCTION__);
- return false;
- }
- else {
- //set bilinear scaling
- g_object_set (video_scale, "method", 1, NULL);
- }
-
- if ((video_sink = gst_element_factory_make("autovideosink",
"video_sink")) == NULL) {
- log_error("%s: problem creating the video_sink element",
__FUNCTION__);
- return false;
- }
-
- //add created elements to a bin
- gst_bin_add_many (GST_BIN (webcam->_videoDisplayBin), video_scale,
video_sink, NULL);
-
- ok = gst_element_link_many(video_scale, video_sink, NULL);
- if (ok != true) {
- log_error("%s: something went wrong in linking elements in
video_display_bin",
- __FUNCTION__);
- return false;
- }
-
- //create ghostpad which can be used to connect this bin to the
- //video_display_queue src ghostpad
- pad = gst_element_get_pad (video_scale, "sink");
- gst_element_add_pad (webcam->_videoDisplayBin, gst_ghost_pad_new
("sink", pad));
- gst_object_unref (GST_OBJECT (pad));
-
- return true;
- }
-
- //make link between display_queue src ghostpad in main_bin and
- //the elements necessary to display video to screen (_videoDisplayBin)
- gboolean
- VideoInputGst::webcamMakeVideoDisplayLink(GnashWebcamPrivate *webcam)
- {
- if (gst_bin_get_by_name(GST_BIN(webcam->_pipeline),
"video_display_bin") == NULL) {
- gst_object_ref(webcam->_videoDisplayBin);
- gst_bin_add (GST_BIN(webcam->_pipeline), webcam->_videoDisplayBin);
- }
-
- GstPad *video_display_queue_src, *video_display_bin_sink;
-
- video_display_queue_src = gst_element_get_pad(webcam->_webcamMainBin,
- "video_display_queue_src");
- video_display_bin_sink = gst_element_get_pad(webcam->_videoDisplayBin,
- "sink");
-
- GstPadLinkReturn padreturn;
- padreturn = gst_pad_link(video_display_queue_src,
video_display_bin_sink);
-
- if (padreturn == GST_PAD_LINK_OK) {
- return true;
- } else {
- log_error("something went wrong in the make_video_display_link
function");
- return false;
- }
- }
-
- //break the link that displays the webcam video to the screen
- gboolean
- VideoInputGst::webcamBreakVideoDisplayLink(GnashWebcamPrivate *webcam)
- {
- if (webcam->_pipelineIsPlaying == true) {
- GstStateChangeReturn state;
- state = gst_element_set_state(webcam->_pipeline, GST_STATE_NULL);
- if (state != GST_STATE_CHANGE_FAILURE) {
- webcam->_pipelineIsPlaying = false;
- } else {
- return false;
- }
- }
-
- gboolean ok;
- GstPad *videoDisplayQueueSrc, *videoDisplayBinSink;
-
- videoDisplayQueueSrc = gst_element_get_pad(webcam->_webcamMainBin,
- "video_display_queue_src");
- videoDisplayBinSink = gst_element_get_pad(webcam->_videoDisplayBin,
- "sink");
-
- ok = gst_pad_unlink(videoDisplayQueueSrc, videoDisplayBinSink);
-
- if (ok != true) {
- log_error("%s: the unlinking of the pads failed", __FUNCTION__);
- return false;
- } else {
- return true;
- }
- }
-
- //make link to saveQueue in main bin
- gboolean
- VideoInputGst::webcamMakeVideoSaveLink(GnashWebcamPrivate *webcam)
- {
- if (gst_bin_get_by_name(GST_BIN(webcam->_pipeline), "video_save_bin")
== NULL) {
- gst_object_ref(webcam->_videoSaveBin);
- gst_bin_add(GST_BIN(webcam->_pipeline), webcam->_videoSaveBin);
- }
-
- //linking
- GstPad *video_save_queue_src, *video_save_sink;
-
- video_save_queue_src = gst_element_get_pad(webcam->_webcamMainBin,
"save_queue_src");
- video_save_sink = gst_element_get_pad(webcam->_videoSaveBin, "sink");
-
- GstPadLinkReturn padreturn;
- padreturn = gst_pad_link(video_save_queue_src, video_save_sink);
-
- if (padreturn == GST_PAD_LINK_OK) {
- return true;
- } else {
- log_error("%s: something went wrong in the make_video_display_link
function",
- __FUNCTION__);
- return false;
- }
- }
-
- //break link to saveQueue in main bin
- gboolean
- VideoInputGst::webcamBreakVideoSaveLink(GnashWebcamPrivate *webcam)
- {
- if (webcam->_pipelineIsPlaying == true) {
- GstStateChangeReturn state;
- state = gst_element_set_state(webcam->_pipeline, GST_STATE_NULL);
- if (state != GST_STATE_CHANGE_FAILURE) {
- webcam->_pipelineIsPlaying = false;
- } else {
- return false;
- }
- }
- gboolean ok;
- GstPad *videoSaveQueueSrc, *videoSaveSink;
- GstStateChangeReturn state;
- videoSaveQueueSrc = gst_element_get_pad(webcam->_webcamMainBin,
- "save_queue_src");
- videoSaveSink = gst_element_get_pad(webcam->_videoSaveBin, "sink");
-
- ok = gst_pad_unlink(videoSaveQueueSrc, videoSaveSink);
- if (ok != true) {
- log_error("%s: unlink failed", __FUNCTION__);
- return false;
- } else {
- state = gst_element_set_state(webcam->_videoSaveBin,
GST_STATE_NULL);
- if (state != GST_STATE_CHANGE_FAILURE) {
- ok = gst_bin_remove(GST_BIN(webcam->_pipeline),
webcam->_videoSaveBin);
- if (ok != true) {
- log_error("%s: couldn't remove saveBin from pipeline",
__FUNCTION__);
+ log_error("%s: couldn't link up sourcebin and tee",
__FUNCTION__);
return false;
} else {
return true;
}
+ }
+ }
+ }
+ return true;
+}
+
+//create a display bin that has ghostpads which allow display-to-screen
+//capabilities as well as save-to-file or buffer capabilities (both
+//implemented as bin ghostpads)
+gboolean
+VideoInputGst::webcamCreateMainBin()
+{
+ GstElement *tee, *video_display_queue, *save_queue;
+ gboolean ok;
+ GstPad *pad;
+
+ GnashWebcamPrivate* webcam = _globalWebcam;
+
+ //initialize a new GST pipeline
+ webcam->_pipeline = gst_pipeline_new("pipeline");
+ assert(webcam->_pipeline);
+
+ webcam->_webcamMainBin = gst_bin_new ("webcam_main_bin");
+ assert(webcam->_webcamMainBin);
+
+ ok = webcamCreateSourceBin();
+ if (ok != true) {
+ log_error("%s: problem creating source bin", __FUNCTION__);
+ return false;
+ }
+
+ assert(webcam->_webcamSourceBin);
+
+ if ((tee = gst_element_factory_make ("tee", "tee")) == NULL) {
+ log_error("%s: problem creating tee element", __FUNCTION__);
+ return false;
+ }
+ if ((save_queue = gst_element_factory_make("queue", "save_queue")) ==
NULL) {
+ log_error("%s: problem creating save_queue element", __FUNCTION__);
+ return false;
+ }
+ if ((video_display_queue =
+ gst_element_factory_make("queue", "video_display_queue")) == NULL) {
+ log_error("%s: problem creating video_display_queue element",
__FUNCTION__);
+ return false;
+ }
+
+ //add created elements to a bin
+ gst_bin_add_many (GST_BIN (webcam->_webcamMainBin),
webcam->_webcamSourceBin,
+ tee, save_queue, video_display_queue, NULL);
+
+ ok = gst_element_link(webcam->_webcamSourceBin, tee);
+ if (ok != true) {
+ log_error("%s: couldn't link webcam_source_bin and tee", __FUNCTION__);
+ return false;
+ }
+
+ ok &= gst_element_link_many (tee, save_queue, NULL);
+ if (ok != true) {
+ log_error("%s: couldn't link tee and save_queue", __FUNCTION__);
+ return false;
+ }
+
+ ok &= gst_element_link_many (tee, video_display_queue, NULL);
+ if (ok != true) {
+ log_error("%s: couldn't link tee and video_display_queue",
__FUNCTION__);
+ return false;
+ }
+
+ gst_bin_add (GST_BIN(webcam->_pipeline), webcam->_webcamMainBin);
+
+ //add ghostpad to save_queue (allows connections between bins)
+ pad = gst_element_get_pad (save_queue, "src");
+ if (pad == NULL) {
+ log_error("%s: couldn't get save_queue_src_pad", __FUNCTION__);
+ return false;
+ }
+ gst_element_add_pad (webcam->_webcamMainBin,
+ gst_ghost_pad_new ("save_queue_src", pad));
+ gst_object_unref (GST_OBJECT (pad));
+
+ //add ghostpad to video_display_queue
+ pad = gst_element_get_pad (video_display_queue, "src");
+ if (pad == NULL) {
+ log_error("%s: couldn't get video_display_queue_pad", __FUNCTION__);
+ return false;
+ }
+ gst_element_add_pad (webcam->_webcamMainBin,
+ gst_ghost_pad_new ("video_display_queue_src", pad));
+ gst_object_unref (GST_OBJECT (pad));
+
+ assert(webcam->_videoSource);
+ assert(_devSelection == 0 || webcam->_capsFilter);
+ assert(_devSelection == 0 || webcam->_currentFormat);
+
+ if (!ok) {
+ log_error("%s: Unable to create main pipeline", __FUNCTION__);
+ return false;
+ }
+ return true;
+}
+
+gboolean
+VideoInputGst::webcamCreateDisplayBin()
+{
+ GstElement *video_scale, *video_sink;
+ gboolean ok;
+ GstPad *pad;
+
+ GnashWebcamPrivate* webcam = _globalWebcam;
+
+ webcam->_videoDisplayBin = gst_bin_new("video_display_bin");
+
+ if (webcam->_videoDisplayBin == NULL) {
+ log_error("%s: something went wrong creating the new
video_display_bin",
+ __FUNCTION__);
+ return false;
+ }
+
+ if ((video_scale = gst_element_factory_make("videoscale", "video_scale"))
== NULL) {
+ log_error("%s: problem creating video_scale element", __FUNCTION__);
+ return false;
+ }
+ else {
+ //set bilinear scaling
+ g_object_set (video_scale, "method", 1, NULL);
+ }
+
+ if ((video_sink = gst_element_factory_make("autovideosink", "video_sink"))
== NULL) {
+ log_error("%s: problem creating the video_sink element", __FUNCTION__);
+ return false;
+ }
+
+ //add created elements to a bin
+ gst_bin_add_many (GST_BIN (webcam->_videoDisplayBin), video_scale,
video_sink, NULL);
+
+ ok = gst_element_link_many(video_scale, video_sink, NULL);
+ if (ok != true) {
+ log_error("%s: something went wrong in linking elements in
video_display_bin",
+ __FUNCTION__);
+ return false;
+ }
+
+ //create ghostpad which can be used to connect this bin to the
+ //video_display_queue src ghostpad
+ pad = gst_element_get_pad (video_scale, "sink");
+ gst_element_add_pad (webcam->_videoDisplayBin, gst_ghost_pad_new ("sink",
pad));
+ gst_object_unref (GST_OBJECT (pad));
+
+ assert(webcam->_videoDisplayBin);
+
+ return true;
+}
+
+//make link between display_queue src ghostpad in main_bin and
+//the elements necessary to display video to screen (_videoDisplayBin)
+gboolean
+VideoInputGst::webcamMakeVideoDisplayLink()
+{
+
+ GnashWebcamPrivate* webcam = _globalWebcam;
+
+ if (gst_bin_get_by_name(GST_BIN(webcam->_pipeline), "video_display_bin")
== NULL) {
+ gst_object_ref(webcam->_videoDisplayBin);
+ gst_bin_add (GST_BIN(webcam->_pipeline), webcam->_videoDisplayBin);
+ }
+
+ GstPad *video_display_queue_src, *video_display_bin_sink;
+
+ video_display_queue_src = gst_element_get_pad(webcam->_webcamMainBin,
+ "video_display_queue_src");
+ video_display_bin_sink = gst_element_get_pad(webcam->_videoDisplayBin,
+ "sink");
+
+ GstPadLinkReturn padreturn;
+ padreturn = gst_pad_link(video_display_queue_src, video_display_bin_sink);
+
+ if (padreturn == GST_PAD_LINK_OK) {
+ return true;
+ } else {
+ log_error("something went wrong in the make_video_display_link
function");
+ return false;
+ }
+}
+
+//break the link that displays the webcam video to the screen
+gboolean
+VideoInputGst::webcamBreakVideoDisplayLink()
+{
+ GnashWebcamPrivate* webcam = _globalWebcam;
+
+ if (webcam->_pipelineIsPlaying == true) {
+ GstStateChangeReturn state;
+ state = gst_element_set_state(webcam->_pipeline, GST_STATE_NULL);
+ if (state != GST_STATE_CHANGE_FAILURE) {
+ webcam->_pipelineIsPlaying = false;
+ } else {
+ return false;
+ }
+ }
+
+ gboolean ok;
+ GstPad *videoDisplayQueueSrc, *videoDisplayBinSink;
+
+ videoDisplayQueueSrc = gst_element_get_pad(webcam->_webcamMainBin,
+ "video_display_queue_src");
+ videoDisplayBinSink = gst_element_get_pad(webcam->_videoDisplayBin,
+ "sink");
+
+ ok = gst_pad_unlink(videoDisplayQueueSrc, videoDisplayBinSink);
+
+ if (ok != true) {
+ log_error("%s: the unlinking of the pads failed", __FUNCTION__);
+ return false;
+ } else {
+ return true;
+ }
+}
+
+//make link to saveQueue in main bin
+gboolean
+VideoInputGst::webcamMakeVideoSaveLink()
+{
+ GnashWebcamPrivate* webcam = _globalWebcam;
+
+ if (gst_bin_get_by_name(GST_BIN(webcam->_pipeline), "video_save_bin") ==
NULL) {
+ gst_object_ref(webcam->_videoSaveBin);
+ gst_bin_add(GST_BIN(webcam->_pipeline), webcam->_videoSaveBin);
+ }
+
+ //linking
+ GstPad *video_save_queue_src, *video_save_sink;
+
+ video_save_queue_src = gst_element_get_pad(webcam->_webcamMainBin,
"save_queue_src");
+ video_save_sink = gst_element_get_pad(webcam->_videoSaveBin, "sink");
+
+ GstPadLinkReturn padreturn;
+ padreturn = gst_pad_link(video_save_queue_src, video_save_sink);
+
+ if (padreturn == GST_PAD_LINK_OK) {
+ return true;
+ } else {
+ log_error("%s: something went wrong in the make_video_display_link
function",
+ __FUNCTION__);
+ return false;
+ }
+}
+
+//break link to saveQueue in main bin
+gboolean
+VideoInputGst::webcamBreakVideoSaveLink()
+{
+ GnashWebcamPrivate* webcam = _globalWebcam;
+
+ if (webcam->_pipelineIsPlaying == true) {
+ GstStateChangeReturn state;
+ state = gst_element_set_state(webcam->_pipeline, GST_STATE_NULL);
+ if (state != GST_STATE_CHANGE_FAILURE) {
+ webcam->_pipelineIsPlaying = false;
+ } else {
+ return false;
+ }
+ }
+ gboolean ok;
+ GstPad *videoSaveQueueSrc, *videoSaveSink;
+ GstStateChangeReturn state;
+ videoSaveQueueSrc = gst_element_get_pad(webcam->_webcamMainBin,
+ "save_queue_src");
+ videoSaveSink = gst_element_get_pad(webcam->_videoSaveBin, "sink");
+
+ ok = gst_pad_unlink(videoSaveQueueSrc, videoSaveSink);
+ if (ok != true) {
+ log_error("%s: unlink failed", __FUNCTION__);
+ return false;
+ } else {
+ state = gst_element_set_state(webcam->_videoSaveBin, GST_STATE_NULL);
+ if (state != GST_STATE_CHANGE_FAILURE) {
+ ok = gst_bin_remove(GST_BIN(webcam->_pipeline),
webcam->_videoSaveBin);
+ if (ok != true) {
+ log_error("%s: couldn't remove saveBin from pipeline",
__FUNCTION__);
+ return false;
} else {
- log_error("%s: videoSaveBin state change failed",
__FUNCTION__);
- return false;
- }
- }
- }
-
- //create a bin to take the video stream and dump it out to
- //an ogg file
- gboolean
- VideoInputGst::webcamCreateSaveBin(GnashWebcamPrivate *webcam)
- {
- GstElement *video_save_csp, *video_save_rate, *video_save_scale,
*video_enc;
- GstElement *mux;
- GstPad *pad;
- gboolean ok;
-
- webcam->_videoSaveBin = gst_bin_new ("video_save_bin");
-
- if ((video_save_csp =
- gst_element_factory_make("ffmpegcolorspace", "video_save_csp"))
- == NULL) {
- log_error("%s: problem with creating video_save_csp element",
- __FUNCTION__);
- return false;
- }
- if ((video_enc = gst_element_factory_make("theoraenc", "video_enc"))
== NULL) {
- log_error("%s: problem with creating video_enc element",
__FUNCTION__);
- return false;
- } else {
- g_object_set (video_enc, "keyframe-force", 1, NULL);
- }
-
- if ((video_save_rate = gst_element_factory_make("videorate",
"video_save_rate")) == NULL) {
- log_error("%s: problem with creating video_save_rate element",
__FUNCTION__);
- return false;
- }
- if ((video_save_scale = gst_element_factory_make("videoscale",
"video_save_scale")) == NULL) {
- log_error("%s: problem with creating video_save_scale element",
__FUNCTION__);
- return false;
- } else {
- //Use bilinear scaling
- g_object_set (video_save_scale, "method", 1, NULL);
- }
- if ((mux = gst_element_factory_make("oggmux", "mux")) == NULL) {
- log_error("%s: problem with creating mux element", __FUNCTION__);
- return false;
- }
- if ((webcam->_videoFileSink = gst_element_factory_make("filesink",
"video_file_sink")) == NULL) {
- log_error("%s: problem with creating video_file_sink element",
__FUNCTION__);
- return false;
- } else {
- g_object_set(webcam->_videoFileSink, "location", "vidoutput.ogg",
NULL);
- }
-
- //add created elements to the video_save_bin in the datastructure
- gst_bin_add_many (GST_BIN (webcam->_videoSaveBin), video_save_csp,
- video_save_rate, video_save_scale, video_enc, mux,
webcam->_videoFileSink,
- NULL);
-
- //add ghostpad
- pad = gst_element_get_pad (video_save_csp, "sink");
- gst_element_add_pad (webcam->_videoSaveBin, gst_ghost_pad_new ("sink",
pad));
- gst_object_unref (GST_OBJECT (pad));
-
- ok = gst_element_link_many (video_save_csp, video_save_rate,
- video_save_scale, video_enc, mux, webcam->_videoFileSink, NULL);
-
- if (ok != true) {
- log_error("%s: there was some problem in linking!", __FUNCTION__);
- }
- return true;
- }
-
- //to handle messages while the main capture loop is running
- gboolean
- bus_call (GstBus * /*bus*/, GstMessage *msg, gpointer /*data*/)
- {
- switch (GST_MESSAGE_TYPE (msg)) {
-
- case GST_MESSAGE_EOS:
- log_debug ("End of stream");
- break;
-
- case GST_MESSAGE_ERROR: {
- gchar *debug;
- GError *error;
-
- gst_message_parse_error (msg, &error, &debug);
- g_free (debug);
-
- log_error ("Error: %s", error->message);
- g_error_free (error);
-
- break;
- }
- default:
- break;
- }
-
- return TRUE;
- }
-
- //start the pipeline and run the g_main_loop
- gboolean
- VideoInputGst::webcamPlay(GnashWebcamPrivate *webcam)
- {
- GstStateChangeReturn state;
- GstBus *bus;
- gint ret;
- //setup bus to watch pipeline for messages
- bus = gst_pipeline_get_bus (GST_PIPELINE (webcam->_pipeline));
- ret = gst_bus_add_watch (bus, bus_call, webcam);
- gst_object_unref (bus);
-
- state = gst_element_set_state (webcam->_pipeline,
GST_STATE_PLAYING);
-
- if (state != GST_STATE_CHANGE_FAILURE) {
- webcam->_pipelineIsPlaying = true;
return true;
- } else {
- return false;
}
- }
-
- gboolean
- VideoInputGst::webcamStop(GnashWebcamPrivate *webcam)
- {
- GstStateChangeReturn state;
-
- state = gst_element_set_state (webcam->_pipeline, GST_STATE_NULL);
- if (state != GST_STATE_CHANGE_FAILURE) {
- webcam->_pipelineIsPlaying = FALSE;
- return true;
} else {
+ log_error("%s: videoSaveBin state change failed", __FUNCTION__);
return false;
}
}
+}
+
+//create a bin to take the video stream and dump it out to
+//an ogg file
+gboolean
+VideoInputGst::webcamCreateSaveBin()
+{
+ GstElement *video_save_csp, *video_save_rate, *video_save_scale,
*video_enc;
+ GstElement *mux;
+ GstPad *pad;
+ gboolean ok;
+
+ GnashWebcamPrivate* webcam = _globalWebcam;
+
+ webcam->_videoSaveBin = gst_bin_new ("video_save_bin");
+
+ if ((video_save_csp =
+ gst_element_factory_make("ffmpegcolorspace", "video_save_csp"))
+ == NULL) {
+ log_error("%s: problem with creating video_save_csp element",
+ __FUNCTION__);
+ return false;
+ }
+ if ((video_enc = gst_element_factory_make("theoraenc", "video_enc")) ==
NULL) {
+ log_error("%s: problem with creating video_enc element", __FUNCTION__);
+ return false;
+ } else {
+ g_object_set (video_enc, "keyframe-force", 1, NULL);
+ }
+
+ if ((video_save_rate = gst_element_factory_make("videorate",
"video_save_rate")) == NULL) {
+ log_error("%s: problem with creating video_save_rate element",
__FUNCTION__);
+ return false;
+ }
+ if ((video_save_scale = gst_element_factory_make("videoscale",
"video_save_scale")) == NULL) {
+ log_error("%s: problem with creating video_save_scale element",
__FUNCTION__);
+ return false;
+ } else {
+ //Use bilinear scaling
+ g_object_set (video_save_scale, "method", 1, NULL);
+ }
+ if ((mux = gst_element_factory_make("oggmux", "mux")) == NULL) {
+ log_error("%s: problem with creating mux element", __FUNCTION__);
+ return false;
+ }
+ if ((webcam->_videoFileSink = gst_element_factory_make("filesink",
"video_file_sink")) == NULL) {
+ log_error("%s: problem with creating video_file_sink element",
__FUNCTION__);
+ return false;
+ } else {
+ g_object_set(webcam->_videoFileSink, "location", "vidoutput.ogg",
NULL);
+ }
+
+ //add created elements to the video_save_bin in the datastructure
+ gst_bin_add_many (GST_BIN (webcam->_videoSaveBin), video_save_csp,
+ video_save_rate, video_save_scale, video_enc, mux,
webcam->_videoFileSink,
+ NULL);
+
+ //add ghostpad
+ pad = gst_element_get_pad (video_save_csp, "sink");
+ gst_element_add_pad (webcam->_videoSaveBin, gst_ghost_pad_new ("sink",
pad));
+ gst_object_unref (GST_OBJECT (pad));
+
+ ok = gst_element_link_many (video_save_csp, video_save_rate,
+ video_save_scale, video_enc, mux, webcam->_videoFileSink, NULL);
+
+ if (ok != true) {
+ log_error("%s: there was some problem in linking!", __FUNCTION__);
+ }
+ return true;
+}
+
+//to handle messages while the main capture loop is running
+gboolean
+bus_call (GstBus * /*bus*/, GstMessage *msg, gpointer /*data*/)
+{
+ switch (GST_MESSAGE_TYPE (msg)) {
+
+ case GST_MESSAGE_EOS:
+ log_debug ("End of stream");
+ break;
+
+ case GST_MESSAGE_ERROR: {
+ gchar *debug;
+ GError *error;
+
+ gst_message_parse_error (msg, &error, &debug);
+ g_free (debug);
+
+ log_error ("Error: %s", error->message);
+ g_error_free (error);
+
+ break;
+ }
+ default:
+ break;
+ }
+
+ return TRUE;
+}
+
+//start the pipeline and run the g_main_loop
+bool
+VideoInputGst::play()
+{
+ GnashWebcamPrivate* webcam = _globalWebcam;
+ assert(_globalWebcam);
+
+ GstStateChangeReturn state;
+ GstBus *bus;
+ gint ret;
+ //setup bus to watch pipeline for messages
+ bus = gst_pipeline_get_bus (GST_PIPELINE (webcam->_pipeline));
+ ret = gst_bus_add_watch (bus, bus_call, webcam);
+ gst_object_unref (bus);
+
+ state = gst_element_set_state (webcam->_pipeline, GST_STATE_PLAYING);
+
+ if (state != GST_STATE_CHANGE_FAILURE) {
+ webcam->_pipelineIsPlaying = true;
+ return true;
+ }
+
+ return false;
+}
+
+bool
+VideoInputGst::stop()
+{
+ GnashWebcamPrivate* webcam = _globalWebcam;
+ GstStateChangeReturn state;
+
+ state = gst_element_set_state (webcam->_pipeline, GST_STATE_NULL);
+ if (state != GST_STATE_CHANGE_FAILURE) {
+ webcam->_pipelineIsPlaying = FALSE;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/// Constructor for the WebcamVidFormat class. This constructor prepares
+/// the data structure for data that will come in later. All gint values
+/// are initialized to -1 to show that these values have never been set.
+///
+WebcamVidFormat::WebcamVidFormat() {
+ width = -1;
+ height = -1;
+ numFramerates = -1;
+ framerates = NULL;
+}
+
+/// Default constructor for the FramerateFraction class. This constructor
prepares
+/// the data structure for data that will come in later. All gint values
+/// are initialized to -1 to show that these values have never been set.
+FramerateFraction::FramerateFraction() {
+ numerator = -1;
+ denominator = -1;
+}
+
+/// Secondary constructor for the FramerateFraction class. This constructor
+/// initialzes the structure with the numerator and denominator values passed
+/// to the constructor.
+FramerateFraction::FramerateFraction(gint num, gint denom) {
+ numerator = num;
+ denominator = denom;
+}
+
+/// Constructor for the GnashWebcam class. This constructor prepares the data
+/// structure for data that will come in later. Also creates a blank hash table
+/// and array.
+GnashWebcam::GnashWebcam() {
+ setElementPtr(NULL);
+ supportedResolutions = g_hash_table_new_full (g_str_hash, g_str_equal,
g_free, NULL);
+ videoFormats = g_array_new (FALSE, FALSE, sizeof (WebcamVidFormat));
+ numVideoFormats = 0;
+}
+
+/// Constructor that initializes all GnashWebcamPrivate variables to have
+/// data dropped in later.
+GnashWebcamPrivate::GnashWebcamPrivate() {
+ _pipeline = NULL;
+ _webcamSourceBin = NULL;
+ _webcamMainBin = NULL;
+ _videoDisplayBin = NULL;
+ _videoSaveBin = NULL;
+ _videoSource = NULL;
+ _capsFilter = NULL;
+ _videoFileSink = NULL;
+ _videoEnc = NULL;
+
+ _pipelineIsPlaying = false;
+
+ _currentFormat = NULL;
+ _eosTimeoutId = 0;
+};
+
+
} //gst namespace
} //media namespace
} //gnash namespace
=== modified file 'libmedia/gst/VideoInputGst.h'
--- a/libmedia/gst/VideoInputGst.h 2009-07-31 21:06:37 +0000
+++ b/libmedia/gst/VideoInputGst.h 2009-08-28 10:32:16 +0000
@@ -29,375 +29,81 @@
///
/// This namespace is used for the Gstreamer implmentation of the VideoInput
/// class.
-namespace gst{
+namespace gst {
+
+class GnashWebcam;
+class GnashWebcamPrivate;
/// \class FramerateFraction
///
/// Convience wrapper class which allows easily calculating fractions from
/// the information returned from probing hardware cameras for supported
/// framerates.
-class FramerateFraction{
- public:
- /// \var FramerateFraction::numerator
- /// \brief contains a gint value for the numerator portion of a
fraction.
- gint numerator;
- /// \var FramerateFraction::denominator
- /// \brief contains a gint value for the denominator portion of a
fraction.
- gint denominator;
-
- /// \brief Constructor which sets the numerator and denominator fields
upon construction.
- ///
- /// @param num The integer numerator value to initialize the
FramerateFraction class with.
- ///
- /// @param denom The integer denominator value to initialzie the
FramerateFraction class with.
- FramerateFraction(gint num, gint denom);
-
- /// Create a new empty FramerateFraction class.
- FramerateFraction();
-};
-
-/// \class GnashWebcam
-///
-/// The initial data structure used to store enumerated information about
-/// attached hardware video input devices. This class is smaller in size
-/// than the GnashWebcamPrivate class which is initialized once the user
-/// specifies a hardware input device to use in the gnashrc file.
-///
-class GnashWebcam {
- public:
- /// \brief Accessor to retreive a the private _element variable
- /// from the GnashWebcam class which contains a pointer
- /// to the video source element.
- ///
- /// @return GstElement* to the video source element
- GstElement* getElementPtr() {return _element;};
-
- /// \brief Accessor to set the private _element variable from
- /// the GnashWebcam class.
- ///
- /// @param element The GstElement pointer to the video source element.
- void setElementPtr(GstElement* element) {_element = element;};
-
- /// \brief Accessor to get the private _devLocation variable from
- /// the GnashWebcam class.
- ///
- /// @return The _devLocation private variable from GnashWebcam class.
- gchar* getDevLocation() {return _devLocation;};
-
- /// \brief Accessor to set the private _devLocation variable from
- /// the GnashWebcam class.
- ///
- /// @param l A gchar* containing the physical location of the video
- /// input hardware device (e.g. on Linux typically would be set
- /// to '/dev/video0').
- void setDevLocation(gchar *l) {_devLocation = l;};
-
- /// \brief Accessor to return the private _gstreamerSrc variable
- /// from the GnashWebcam class.
- ///
- /// @return The _gstreamerSrc variable from the GnashWebcam class.
- /// which should contain the type of the Gstreamer video source
- /// element (e.g. v4lsrc, v4l2src).
- gchar* getGstreamerSrc() {return _gstreamerSrc;};
-
- /// \brief Accessor to set the private _gstreamerSrc variable
- /// from the GnashWebcam class.
- ///
- /// @param s A gchar* containing the type of the Gstreamer source
- /// element type (e.g. v4lsrc, v4l2src, etc)
- void setGstreamerSrc(gchar *s) {_gstreamerSrc = s;};
-
- /// \brief Accessor to get the private _productName variable
- /// from the GnashWebcam class.
- ///
- /// @return A gchar* containing the video input's hardware name
- /// (e.g. Built-In Webcam or Microsoft LifeCam VX500).
- gchar* getProductName() {return _productName;};
-
- /// \brief Accessor to set the private _productName variable
- /// from the GnashWebcam class.
- ///
- /// @param n A gchar* to the hardware input device's hardware name
- /// (e.g. Built-In Webcam or Microsoft LifeCam VX500).
- void setProductName(gchar *n) {_productName = n;};
-
- /// \var GnashWebcam::numVideoFormats
- /// \brief Contains an integer value representing the number of
- /// video formats the camera supports (used for iteration
- /// purposes).
- gint numVideoFormats;
-
- /// \var GnashWebcam::videoFormats
- /// \brief A GArray containing WebcamVidFormat data structures
- /// (see WebcamVidFormat class documentation for more info).
- GArray* videoFormats;
-
- /// \var GnashWebcam::supportedResolutions
- /// \brief A hash table for easy lookup of resolutions the hardware
- /// camera supports.
- GHashTable* supportedResolutions;
-
- /// Constructor for the GnashWebcam class.
- GnashWebcam();
-
- private:
- /// \var GnashWebcam::_element
- /// \brief GstElement* which points to the video source
- /// element.
- GstElement* _element;
-
- /// \var GnashWebcam::_devLocation
- /// \brief Contains the physical location of the webcam device
- /// (e.g. on Linux typically would be set to /dev/video0).
- gchar* _devLocation;
-
- /// \var GnashWebcam::_gstreamerSrc
- /// \brief Contains a gchar* which describes the gstreamer source
- /// type (e.g. v4lsrc or v4l2src).
- gchar* _gstreamerSrc;
-
- /// \var GnashWebcam::_productName
- /// \brief Contains a gchar* which describes the name of the hardware
- /// device (e.g. Built-In Webcam or Microsoft LifeCam VX500).
- gchar* _productName;
+class FramerateFraction
+{
+public:
+ /// \var FramerateFraction::numerator
+ /// \brief contains a gint value for the numerator portion of a fraction.
+ gint numerator;
+ /// \var FramerateFraction::denominator
+ /// \brief contains a gint value for the denominator portion of a fraction.
+ gint denominator;
+
+ /// \brief Constructor which sets the numerator and denominator fields
upon construction.
+ ///
+ /// @param num The integer numerator value to initialize the
FramerateFraction class with.
+ ///
+ /// @param denom The integer denominator value to initialzie the
FramerateFraction class with.
+ FramerateFraction(gint num, gint denom);
+
+ /// Create a new empty FramerateFraction class.
+ FramerateFraction();
};
/// \class WebcamVidFormat
///
/// Class used to hold enumerated information about usable video formats.
///
-class WebcamVidFormat {
- public:
- /// \var WebcamVidFormat::mimetype
- /// \brief Contains a gchar* which describes the raw video input stream
- /// from the camera formated in a Gstreamer video format
- /// type (e.g. video/x-raw-rgb or video/x-raw-yuv).
- gchar *mimetype;
-
- /// \var WebcamVidFormat::width
- /// \brief Contains a gint value describing the width of the selected
- /// format.
- gint width;
-
- /// \var WebcamVidFormat::height
- /// \brief Contains a gint value describing the height of the selected
- /// format.
- gint height;
-
- /// \var WebcamVidFormat::numFramerates
- /// \brief Contains a gint value representing the number of framerate
- /// values supported by the format described in the mimetype var.
- gint numFramerates;
-
- /// \var WebcamVidFormat::framerates
- /// \brief Pointer to a FramerateFraction class which simply holds a
- /// temporary framerate variable while trying to determine the
- /// highest possible supported framerate for the format described
- /// in the mimetype var.
- FramerateFraction *framerates;
-
- /// \var WebcamVidFormat::highestFramerate
- /// \brief Holds the highest_frame supported by the format described
- /// in the mimetype var.
- FramerateFraction highestFramerate;
-
- /// Constructor for the WebcamVidFormat class
- WebcamVidFormat();
-};
-
-/// Constructor for the WebcamVidFormat class. This constructor prepares
-/// the data structure for data that will come in later. All gint values
-/// are initialized to -1 to show that these values have never been set.
-///
-WebcamVidFormat::WebcamVidFormat() {
- width = -1;
- height = -1;
- numFramerates = -1;
- framerates = NULL;
-}
-
-/// Default constructor for the FramerateFraction class. This constructor
prepares
-/// the data structure for data that will come in later. All gint values
-/// are initialized to -1 to show that these values have never been set.
-FramerateFraction::FramerateFraction() {
- numerator = -1;
- denominator = -1;
-}
-
-/// Secondary constructor for the FramerateFraction class. This constructor
-/// initialzes the structure with the numerator and denominator values passed
-/// to the constructor.
-FramerateFraction::FramerateFraction(gint num, gint denom) {
- numerator = num;
- denominator = denom;
-}
-
-/// Constructor for the GnashWebcam class. This constructor prepares the data
-/// structure for data that will come in later. Also creates a blank hash table
-/// and array.
-GnashWebcam::GnashWebcam() {
- setElementPtr(NULL);
- supportedResolutions = g_hash_table_new_full (g_str_hash, g_str_equal,
g_free, NULL);
- videoFormats = g_array_new (FALSE, FALSE, sizeof (WebcamVidFormat));
- numVideoFormats = 0;
-}
-
-/// \class GnashWebcamPrivate
-///
-/// This class is initialized once a hardware video input device is chosen.
-/// It is really the workhorse of VideoInputGst. It contains all the important
-/// Gstreamer elements (element pointers, bins, pipelines, the GMainLoop, etc.)
-///
-class GnashWebcamPrivate
+class WebcamVidFormat
{
- public:
- /// Constructor for the GnashWebcamPrivate class.
- GnashWebcamPrivate();
-
- /// \brief Accessor to set the private _webcamDevice variable in the
- /// GnashWebcamPrivate class.
- ///
- /// @param d A pointer to a GnashWebcam class for the selected input
device.
- void setWebcamDevice(GnashWebcam *d) {_webcamDevice = d;}
-
- /// \brief Accessor to set the private _deviceName variable in the
- /// GnashWebcamPrivate class.
- ///
- /// @param n A gchar* describing the name of the hardware device
- /// (e.g. Built-In Webcam or Microsoft LifeCam VX500).
- void setDeviceName(gchar *n) {_deviceName = n;}
-
- //FIXME: this should eventually be a private or protected data field
- //protected:
-
- /// \var GnashWebcamPrivate::_pipeline
- ///
- /// \brief A pointer to the main Gstreamer pipeline that all
- /// created elements and bins will be dropped into.
- GstElement *_pipeline;
-
- /// \var GnashWebcamPrivate::_webcamSourceBin
- ///
- /// A pointer to the Gstreamer source bin. This variable is set
- /// inside of the make_webcamSourceBin() function. The pipeline
- /// API of this source bin is written as follows:
- /// videosourcedevice ! capsfilter (ghostpad)
- GstElement *_webcamSourceBin;
-
- /// \var GnashWebcamPrivate::_webcamMainBin
- ///
- /// A pointer to the Gstreamer main bin. This variable is set
- /// inside of the make_webcamMainBin() function. The pipeline
- /// API of the main bin is written as follows:
- /// tee ! save_queue (ghostpad)
- ///
- /// tee ! display_queue (ghostpad)
- ///
- /// This basically creates two queues where video stream data sits
- /// and can be attached (optionally) to a display_bin to show the
- /// video onscreen or to a save_bin to mux-out the stream and
- /// save to a file on disk.
- GstElement *_webcamMainBin;
-
- /// \var GnashWebcamPrivate::_videoDisplayBin
- ///
- /// A pointer to the Gstreamer display bin. This variable is set
- /// inside of the make_webcam_display_bin() function. The pipeline
- /// API of the video_display_bin is written as follows:
- ///
- /// videoscale ! videosink
- ///
- /// This bin is dropped into the webcam_main_bin, but by default
- /// the connection to display_queue is not made. This means that
- /// even though the video_display_bin is created, it is not linked
- /// and thus will not show video to the screen unless you call the
- /// webcamMakeVideoDisplayLink() function.
- GstElement *_videoDisplayBin;
-
- /// \var GnashWebcamPrivate::_videoSaveBin
- ///
- /// A pointer to the Gstreamer video_save_bin. This variable is set
- /// inside of the make_webcam_save_bin() function. The pipeline
- /// API of the video_save_bin is written as follows:
- ///
- /// ffmpegcolorspace ! videorate ! videoscale ! theoraenc ! oggmux !
filesink
- ///
- /// This bin is dropped into the webcam_main_bin and is linked
automatically
- /// to the video_save_queue element in the webcam_main_bin
- /// Note: if you want to save the file in a different format, simply
- /// link up video scale to a different encoder and muxer.
- GstElement *_videoSaveBin;
-
- /// \var GnashWebcamPrivate::_videoSource
- /// \brief Contains a direct link to the src pad in the video source
- /// element. This is different from _webcamSourceBin in that
- /// it points to the video source element INSIDE the bin, not
- /// the source bin itself.
- GstElement *_videoSource;
-
- /// \var GnashWebcamPrivate::_capsFilter
- /// \brief Contains a direct link to the src pad in the capsfilter
- /// element.
- GstElement *_capsFilter;
-
- /// \var GnashWebcamPrivate:_videoFileSink
- /// \brief Contains a direct link to the video_file_sink element
- GstElement *_videoFileSink;
-
- /// \var GnashWebcamPrivate::_videoEnc
- /// \brief Contains a direct link to the video encoder element
- GstElement *_videoEnc;
-
- /// \var GnashWebcamPrivate::_pipelineIsPlaying
- /// \brief Boolean value which is changed based on whether or not
- /// the Gstreamer pipeline status is GST_STATE_PLAYING (true)
- /// or GST_STATE_NULL (false), GST_STATE_READY (false),
- /// GST_STATE_PAUSED (false).
- gboolean _pipelineIsPlaying;
-
- /// \var GnashWebcamPrivate::_deviceName
- /// \brief Contains a string with the hardware device name (transferred
- /// from GnashWebcam class
- gchar *_deviceName;
-
- /// \var GnashWebcamPrivate::_webcamDevice
- /// \brief Contains a pointer to the original GnashWebcam class
- /// that was created when enumerating and probing attached
- /// hardware.
- GnashWebcam *_webcamDevice;
-
- /// \var GnashWebcamPrivate::_currentFormat
- /// \brief Contains a pointer to the WebcamVidFormat data structure
- /// selected to be used with this pipeline.
- WebcamVidFormat *_currentFormat;
-
- /// \var GnashWebcamPrivate::_eosTimeoutId
- /// \brief This variable is not currently used, but will eventually
- /// be used as a timeout when networking encapsulation is being
- /// used.
- guint _eosTimeoutId;
-};
-
-/// Constructor that initializes all GnashWebcamPrivate variables to have
-/// data dropped in later.
-GnashWebcamPrivate::GnashWebcamPrivate() {
- _pipeline = NULL;
- _webcamSourceBin = NULL;
- _webcamMainBin = NULL;
- _videoDisplayBin = NULL;
- _videoSaveBin = NULL;
- _videoSource = NULL;
- _capsFilter = NULL;
- _videoFileSink = NULL;
- _videoEnc = NULL;
-
- _deviceName = NULL;
-
- _pipelineIsPlaying = false;
-
- _currentFormat = NULL;
- _eosTimeoutId = 0;
-};
+public:
+ /// \var WebcamVidFormat::mimetype
+ /// \brief Contains a gchar* which describes the raw video input stream
+ /// from the camera formated in a Gstreamer video format
+ /// type (e.g. video/x-raw-rgb or video/x-raw-yuv).
+ gchar *mimetype;
+
+ /// \var WebcamVidFormat::width
+ /// \brief Contains a gint value describing the width of the selected
+ /// format.
+ gint width;
+
+ /// \var WebcamVidFormat::height
+ /// \brief Contains a gint value describing the height of the selected
+ /// format.
+ gint height;
+
+ /// \var WebcamVidFormat::numFramerates
+ /// \brief Contains a gint value representing the number of framerate
+ /// values supported by the format described in the mimetype var.
+ gint numFramerates;
+
+ /// \var WebcamVidFormat::framerates
+ /// \brief Pointer to a FramerateFraction class which simply holds a
+ /// temporary framerate variable while trying to determine the
+ /// highest possible supported framerate for the format described
+ /// in the mimetype var.
+ FramerateFraction *framerates;
+
+ /// \var WebcamVidFormat::highestFramerate
+ /// \brief Holds the highest_frame supported by the format described
+ /// in the mimetype var.
+ FramerateFraction highestFramerate;
+
+ /// Constructor for the WebcamVidFormat class
+ WebcamVidFormat();
+};
+
/// \class VideoInputGst
///
@@ -405,21 +111,118 @@
/// defined in this header file. However, most of the significant information
/// is actually stored in a GnashWebcamPrivate class.
///
-class VideoInputGst : public VideoInput, public GnashWebcamPrivate {
+class VideoInputGst : public VideoInput
+{
public:
+
/// Constructor for the VideoInputGst class
VideoInputGst();
/// Destructor for the VideoInputGst class
~VideoInputGst();
- /// \brief This function interacts with the hardware on the machine
- /// to enumerate information about devices connected. Currently
- /// this function only looks for videotestsources (implemented
- /// in Gstreamer), video4linux and video4linux2 sources.
- /// @return Nothing. All pertantent information is now stored in a
- /// GnashWebcam class.
- void findVidDevs();
+ static void getNames(std::vector<std::string>& names);
+
+ /// Return the current activity level of the webcam
+ //
+ /// @return A double specifying the amount of motion currently
+ /// detected by the camera.
+ double activityLevel () const { return _activityLevel; }
+
+ /// The maximum available bandwidth for outgoing connections
+ //
+ /// TODO: see if this should really be here.
+ size_t bandwidth() const { return _bandwidth; }
+
+ void setBandwidth(size_t bandwidth) {
+ _bandwidth = bandwidth;
+ }
+
+ /// The current frame rate of the webcam
+ //
+ /// @return A double specifying the webcam's current FPS
+ double currentFPS() const { return _currentFPS; }
+
+ /// The maximum FPS rate of the webcam
+ //
+ /// @return A double specifying the webcam's maximum FPS
+ double fps() const { return _fps; }
+
+ /// Return the height of the webcam's frame
+ size_t height() const { return _height; }
+
+ /// Return the width of the webcam's frame
+ size_t width() const { return _width; }
+
+ /// The index of the camera
+ size_t index() const { return _index; }
+
+ /// Request a native mode most closely matching the passed variables.
+ //
+ /// @param width The required width
+ /// @param height The required height
+ /// @param fps The required frame rate
+ /// @param favorArea How to match the requested mode.
+ void requestMode(size_t width, size_t height, double fps, bool favorArea);
+
+ /// Set the amount of motion required before notifying the core
+ void setMotionLevel(int m) { _motionLevel = m; }
+
+ /// Return the current motionLevel setting
+ int motionLevel() const { return _motionLevel; }
+
+ /// Set time without motion in milliseconds before core is notified
+ void setMotionTimeout(int m) { _motionTimeout = m; }
+
+ /// Return the current motionTimeout setting.
+ int motionTimeout() const { return _motionTimeout; }
+
+ void mute(bool m) { _muted = m; }
+ bool muted() const { return _muted; }
+
+ /// Return the name of this webcam
+ //
+ /// @return a string specifying the name of the webcam.
+ const std::string& name() const { return _name; }
+
+ /// Set the quality of the webcam
+ void setQuality(int q) { _quality = q; }
+
+ /// Return the current quality of the webcam
+ int quality() const { return _quality; }
+
+ /// \brief Function starts up the pipeline designed earlier in code
+ /// execution. This puts everything into motion.
+ ///
+ /// @return True if the pipeline was started correctly, false otherwise.
+ bool play();
+
+ /// \brief Function stops the pipeline designed earlier in code execution.
+ ///
+ /// @return True if the pipeline was stopped correctly, false otherwise.
+ bool stop();
+
+
+ /// Set this VideoInput's webcam to the device corresponding to an index.
+ //
+ /// Now transfer the
+ /// important information from the GnashWebcam structure to the
+ /// GnashWebcamPrivate structure which is larger because it has
+ /// space to store Gstreamer pipeline, element and bin elements.
+ /// See definition of GnashWebcamPrivate for more info.
+ ///
+ /// @param dev_select The index of the camera the user wants to
+ /// select.
+ /// @return If the device index doesn't exist, return false
+ bool setWebcam(size_t index);
+
+ /// Call all functions necessary for initializing the camera.
+ //
+ /// For gstreamer this includes setting up bins.
+ //
+ /// Return false on failure of any initialization.
+ /// TODO: better throw MediaException.
+ bool init();
/// \brief This function is important in the flow of the code. It looks
/// in the gnashrc file to see if you have a default camera defined
@@ -431,6 +234,60 @@
/// gnashrc file.
int makeWebcamDeviceSelection();
+ /// ==================================
+ /// Functions that shouldn't be public.
+ /// ==================================
+
+ /// \brief Function links the videoSaveBin to the videoSaveQueue in the
+ /// main bin.
+ /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
+ /// created previously in a call to transferToPrivate()
+ ///
+ /// @return True if the link to the videoSaveQueue was successfully, false
+ /// otherwise.
+ gboolean webcamMakeVideoSaveLink();
+
+ /// \brief Function breaks link between the videoSaveBin and the
videoSaveQueue
+ /// in the main bin.
+ /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
+ /// created previously in a call to transferToPrivate()
+ /// @return True if the link was succesfully broken, false otherwise
+ gboolean webcamBreakVideoSaveLink();
+
+ /// \brief Function creates the save bin. For more information on pipeline
+ /// implementation and this function in general see the definition of
+ /// the _webcam_save_bin variable in the GnashWebcamPrivate structure
+ /// documentation.
+ ///
+ /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
+ /// created previously in a call to transferToPrivate()
+ ///
+ /// @return True if everything went correctly (making elements, dropping
+ /// into bins and linking elements), false otherwis
+ gboolean webcamCreateSaveBin();
+
+ /// \brief Function links the video_display_bin to the video_display_queue
+ /// in the main bin.
+ ///
+ /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
+ /// created previously in a call to transferToPrivate()
+ ///
+ /// @return True if the link to the video_display_queue was successful,
+ /// False otherwise.
+ gboolean webcamMakeVideoDisplayLink();
+
+ /// \brief Function breaks the link between the _videoDisplayBin and the
+ /// _videoDisplayQueue in the main bin
+ ///
+ /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
+ /// created previously in a call to transferToPrivate()
+ ///
+ /// @return True if the link was successfully broken, false otherwise
+ gboolean webcamBreakVideoDisplayLink();
+
+private:
+
+
/// \brief This function makes a temporary pipeline with the selected
device
/// to determine its capabilities (Gstreamer calls these caps). This
/// information is saved in a GnashWebcamPrivate class and will be
@@ -470,18 +327,20 @@
void addSupportedFormat(GnashWebcam *cam, WebcamVidFormat *video_format,
GstStructure *format_structure);
- /// \brief This function is called by addSupportedFormat. Since we have
found
- /// a format that will work with the input device, we now need to
figure
- /// out what framerate the camera can capture at that corresponds
with the
- /// format being analyzed.
- ///
- /// @param video_format A pointer to a WebcamVidFormat class taht has had
all
- /// variables initialized to their respective values.
- /// @param structure A pointer to a structure initialized with the
capabilities
- /// of the selected input device.
- ///
- /// @return Nothing. All pertantent information is stored in a
WebcamVidFormat class.
- void getSupportedFramerates(WebcamVidFormat *video_format, GstStructure
*structure);
+ /// \brief This function is called by addSupportedFormat. Since we have
+ /// found a format that will work with the input device, we now
+ /// need to figure out what framerate the camera can capture at
+ /// that corresponds with the format being analyzed.
+ ///
+ /// @param video_format A pointer to a WebcamVidFormat class that has
+ /// had all variables initialized to their respective values.
+ /// @param structure A pointer to a structure initialized with the
+ /// capabilities of the selected input device.
+ ///
+ /// @return Nothing. All pertintent information is stored in a
+ /// WebcamVidFormat class.
+ void getSupportedFramerates(WebcamVidFormat *video_format,
+ GstStructure *structure);
/// \brief This function checks to see if the current format selected for
the
/// webcam supports the framerate passed in as the second argument
@@ -501,22 +360,6 @@
/// passed in (a WebcamVidFormat class).
void findHighestFramerate(WebcamVidFormat *format);
- /// \brief Function is called when all the information has been enumerated
- /// that can be stored in the GnashWebcam structure. Now transfer the
- /// important information from the GnashWebcam structure to the
- /// GnashWebcamPrivate structure which is larger because it has
- /// space to store Gstreamer pipeline, element and bin elements.
- /// See definition of GnashWebcamPrivate for more info.
- ///
- /// @param dev_select The integer value of the camera the user wants to
select.
- /// This might be changed to the name of the camera, but
it's
- /// currently an integer (if it changes, we need to change
the
- /// gnashrc element). If this value is 0, you've selected a
- /// videotestsrc.
- ///
- /// @return A pointer to the newly created GnashWebcamPrivate structure.
- GnashWebcamPrivate* transferToPrivate(gint dev_select);
-
/// \brief Function creates the source bin. For more information on
pipeline
/// implementation and this function in general see the definition of
/// the _webcamSourceBin variable in the GnashWebcamPrivate structure
@@ -527,16 +370,14 @@
///
/// @return True if everything went correctly (making elements, dropping
/// into bins and linking elements), false otherwise.
- gboolean webcamCreateSourceBin(GnashWebcamPrivate *webcam);
+ gboolean webcamCreateSourceBin();
/// \brief Function is called when changes have been made to certain
variables
/// that effect the video source's capabilities (specifically
resolution
/// and fps values)
- /// @param webcam A pointer to the GnashWebcamPrivate data structure where
- /// changes have been made to resolution or fps variables
/// @return True if the changes to the source's capabilities happened
succesfully
/// false otherwise.
- gboolean webcamChangeSourceBin(GnashWebcamPrivate *webcam);
+ gboolean webcamChangeSourceBin();
/// \brief Function creates the main bin. For more information on pipeline
/// implementation and this function in general see the definition of
@@ -548,7 +389,7 @@
///
/// @return True if everything went correctly (making elements, dropping
/// into bins and linking elements), false otherwise.
- gboolean webcamCreateMainBin(GnashWebcamPrivate *webcam);
+ gboolean webcamCreateMainBin();
/// \brief Function creates the display bin. For more information on
pipeline
/// implementation and this function in general see the definition of
@@ -560,99 +401,24 @@
///
/// @return True if everything went correctly (making elements, dropping
/// into bins and linking elements), false otherwise.
- gboolean webcamCreateDisplayBin(GnashWebcamPrivate *webcam);
-
- /// \brief Function links the video_display_bin to the video_display_queue
- /// in the main bin.
- ///
- /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
- /// created previously in a call to transferToPrivate()
- ///
- /// @return True if the link to the video_display_queue was successful,
- /// False otherwise.
- gboolean webcamMakeVideoDisplayLink(GnashWebcamPrivate *webcam);
-
- /// \brief Function breaks the link between the _videoDisplayBin and the
- /// _videoDisplayQueue in the main bin
- ///
- /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
- /// created previously in a call to transferToPrivate()
- ///
- /// @return True if the link was successfully broken, false otherwise
- gboolean webcamBreakVideoDisplayLink(GnashWebcamPrivate *webcam);
-
- /// \brief Function links the videoSaveBin to the videoSaveQueue in the
- /// main bin.
- /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
- /// created previously in a call to transferToPrivate()
- ///
- /// @return True if the link to the videoSaveQueue was successfully, false
- /// otherwise.
- gboolean webcamMakeVideoSaveLink(GnashWebcamPrivate *webcam);
-
- /// \brief Function breaks link between the videoSaveBin and the
videoSaveQueue
- /// in the main bin.
- /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
- /// created previously in a call to transferToPrivate()
- /// @return True if the link was succesfully broken, false otherwise
- gboolean webcamBreakVideoSaveLink(GnashWebcamPrivate *webcam);
-
- /// \brief Function creates the save bin. For more information on pipeline
- /// implementation and this function in general see the definition of
- /// the _webcam_save_bin variable in the GnashWebcamPrivate structure
- /// documentation.
- ///
- /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
- /// created previously in a call to transferToPrivate()
- ///
- /// @return True if everything went correctly (making elements, dropping
- /// into bins and linking elements), false otherwis
- gboolean webcamCreateSaveBin(GnashWebcamPrivate *webcam);
-
- /// \brief Function starts up the pipeline designed earlier in code
- /// execution. This puts everything into motion.
- ///
- /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
- /// created previously in a call to transferToPrivate()
- ///
- /// @return True if the pipeline was started correctly, false otherwise.
- gboolean webcamPlay(GnashWebcamPrivate *webcam);
-
- /// \brief Function stops the pipeline designed earlier in code execution.
- ///
- /// @param webcam A pointer to the GnashWebcamPrivate webcam structure
- /// created previously in a call to transferToPrivate()
- /// @return True if the pipeline was stopped correctly, false otherwise.
- gboolean webcamStop(GnashWebcamPrivate *webcam);
-
- /// \brief Accessor which returns the vid_vect private variable in the
- /// VideoInputGst class.
- ///
- /// @return A pointer to a vector of GnashWebcam pointers.
- std::vector<GnashWebcam*>* getVidVect() {return &_vidVect;}
-
- /// \brief Accessor which sets the number of devices in the vid_vect
- ///
- /// @param i The integer value representing the number of devices attached
- /// to the machine.
- void setNumdevs(int i) {_numdevs = i;}
-
- /// \brief Accessor which returns the number of video devices attached
- /// to the machine (useful in accessing the vid_vect vector).
- ///
- /// @return The _numdev variable in the VideoInputGst class.
- int getNumdevs() {return _numdevs;}
-
- /// \brief Accessor which increments the number of video devices
- /// attached to the machine.
- void incrementNumdevs() {_numdevs += 1;}
+ gboolean webcamCreateDisplayBin();
+
+
/// \brief Accessor to return a pointer to the global GnashWebcamPrivate
/// variable
/// @return A pointer to the global GnashWebcamPrivate pointer
GnashWebcamPrivate* getGlobalWebcam() {return _globalWebcam;}
-private:
+ /// \brief This function interacts with the hardware on the machine
+ /// to enumerate information about devices connected. Currently
+ /// this function only looks for videotestsources (implemented
+ /// in Gstreamer), video4linux and video4linux2 sources.
+ //
+ /// @return Nothing. All pertintent information is stored to the passed
+ /// vector. Note: elements can also be null.
+ static void findVidDevs(std::vector<GnashWebcam*>& cams);
+
/// \var VideoInputGst::_vidVect
/// \brief A vector containing pointers to GnashWebcam classes.
std::vector<GnashWebcam*> _vidVect;
@@ -671,10 +437,60 @@
/// \brief Convienient pointer to the selected device's GnashWebcamPrivate
/// class structure.
GnashWebcamPrivate *_globalWebcam;
+
+ /// TODO: see which of these need to be retrieved from the camera,
+ /// which of them should be stored like this, and which should
+ /// be stored in the Camera_as relay object.
+
+ /// The currently detected activity level. This should be queried from
+ /// the camera.
+ double _activityLevel;
+
+ /// The available bandwidth. This probably shouldn't be dealt with by
+ /// the camera class. But maybe it should.
+ size_t _bandwidth;
+
+ /// The current FPS of the camera. This should be queried from the
+ /// camera.
+ double _currentFPS;
+
+ /// The maximum FPS allowed.
+ double _fps;
+
+ /// The height of the frame. This should probably be retrieved from
+ /// the camera
+ size_t _height;
+
+ /// The width of the frame. This should probably be retrieved from
+ /// the camera
+ size_t _width;
+
+ /// The index of this Webcam
+ size_t _index;
+
+ /// The motion level required to trigger a notification to the core
+ int _motionLevel;
+
+ /// The length of inactivity required to trigger a notification to the
core.
+ int _motionTimeout;
+
+ /// Whether access to the camera is allowed. This depends on the rcfile
+ /// setting
+ bool _muted;
+
+ /// The name of this camera.
+ std::string _name;
+
+ /// The current quality setting.
+ int _quality;
+
};
+
+
+
} //gst namespace
} // gnash.media namespace
} // gnash namespace
-#endif // __VIDEOINPUT_H__
+#endif
=== modified file 'testsuite/actionscript.all/Microphone.as'
--- a/testsuite/actionscript.all/Microphone.as 2009-08-10 07:18:06 +0000
+++ b/testsuite/actionscript.all/Microphone.as 2009-08-28 13:28:01 +0000
@@ -79,7 +79,7 @@
// Documented to be an array.
check ( Microphone.hasOwnProperty("names"));
-xcheck_equals (typeof (Microphone.names), 'object');
+check_equals (typeof (Microphone.names), 'object');
// test the Microphone constuctor
var microphoneObj = Microphone.get();
=== modified file 'testsuite/libmedia.all/test_videoinput.cpp'
--- a/testsuite/libmedia.all/test_videoinput.cpp 2009-07-31 21:12:01
+0000
+++ b/testsuite/libmedia.all/test_videoinput.cpp 2009-08-28 12:53:07
+0000
@@ -42,7 +42,7 @@
static string infile;
-static void test_client();
+void test_client();
LogFile& dbglogfile = LogFile::getDefaultInstance();
@@ -53,220 +53,85 @@
return 0;
}
-static void test_client()
+void test_client()
{
- //create a test class, call constructor
- gst::VideoInputGst vig;
-
- vig.findVidDevs();
- std::vector<GnashWebcam*> *vid_vect = vig.getVidVect();
-
- if (vid_vect->empty() == true) {
+
+ std::vector<std::string> names;
+ VideoInputGst::getNames(names);
+
+ if (names.empty()) {
runtest.fail("the video vector was not created by
find_vid_devs");
} else {
runtest.pass("the video vector was created");
}
- if (vid_vect->at(0) == NULL) {
- runtest.fail("the 0th vid_vect element is not the test source");
- } else {
- runtest.pass("the videotestsrc element was created");
- }
-
- if (vid_vect->at(0)->getElementPtr() == NULL) {
- runtest.fail("the videotestsrc didn't get assigned an element
ptr");
- } else {
- runtest.pass("the videotestsrc was assigned an element ptr");
- }
-
- if (g_strcmp0(vid_vect->at(0)->getGstreamerSrc(), "videotestsrc") == 1)
{
- runtest.fail("the zeroth element doesn't contain the right
source info");
- } else {
- runtest.pass("the zeroth vid_vect element contains the right
source info");
- }
- if (g_strcmp0(vid_vect->at(0)->getProductName(), "videotest") == 1) {
+ if (names.at(0) != "videotest") {
runtest.fail("the zeroth element doesn't contain the right
product name info");
} else {
runtest.pass("the zeroth vid_vect element contains the right
product name info");
}
- int devselect;
- devselect = vig.makeWebcamDeviceSelection();
+ VideoInputGst vig;
+
+ int devselect = vig.makeWebcamDeviceSelection();
- GnashWebcamPrivate *webcam = NULL;
- webcam = vig.transferToPrivate(devselect);
- if (webcam == NULL) {
+ bool ret = vig.setWebcam(devselect);
+ if (!ret) {
runtest.fail("the transferToPrivate function didn't return anything");
} else {
runtest.pass("the transferToPrivate function returned a
GnashWebcamPrivate ptr");
}
if (devselect == 0) {
- //videotestsrc tests
- if (g_strcmp0(webcam->_deviceName, "videotest") == 1) {
+ if (vig.name() != "videotest") {
runtest.fail("webcam doesn't have the right _deviceName value");
} else {
runtest.pass("webcam has the right _deviceName value");
}
- if (webcam->_webcamDevice != vid_vect->at(0)){
- runtest.fail("_webcamDevice values isn't correct");
- } else {
- runtest.pass("_webcamDevice has the right address");
- }
} else {
//real camera source tests
- if (webcam->_deviceName == NULL) {
+ if (vig.name().empty()) {
runtest.fail("_deviceName isn't set in GnashWebcamPrivate class");
} else {
runtest.pass("_deviceName is set in GnashWebcamPrivate class");
}
- if (webcam->_webcamDevice != vid_vect->at(devselect)) {
- runtest.fail("_webcamDevice isn't set in GnashWebcamPrivate
class");
- } else {
- runtest.pass("_webcamDevice has the right address");
- }
- }
-
- gboolean result = false;
- result = vig.webcamCreateMainBin(webcam);
- if (result != true) {
- runtest.fail("the webcamCreateMainBin() function reported an error");
- } else {
- runtest.pass("the webcamCreateMainBin() function isn't reporting
errors");
- }
- if (webcam->_pipeline == NULL) {
- runtest.fail("the main pipeline (webcam->_pipeline) wasn't
initialized");
- } else {
- runtest.pass("the main pipeline (webcam->_pipeline) was initializied");
- }
- if (webcam->_webcamMainBin == NULL) {
- runtest.fail("the _webcamMainBin wasn't created");
- } else {
- runtest.pass("the _webcamMainBin was created");
- if ((gst_element_get_pad(webcam->_webcamMainBin, "save_queue_src")) ==
NULL) {
- runtest.fail("save_queue_src ghostpad wasn't created");
- } else {
- runtest.pass("save_queue_src ghostpad was created");
- }
- if ((gst_element_get_pad(webcam->_webcamMainBin,
"video_display_queue_src")) == NULL) {
- runtest.fail("video_display_queue_src ghostpad wasn't created");
- } else {
- runtest.pass("video_display_queue_src ghostpad was created");
- }
- if ((gst_bin_get_by_name(GST_BIN(webcam->_pipeline),
- "webcam_main_bin")) == NULL) {
- runtest.fail("webcamMainBin has an unexpected address");
- } else {
- runtest.pass("webcamMainBin's address is set as expected");
- }
- }
- if (webcam->_webcamSourceBin == NULL) {
- runtest.fail("the _webcamSourceBin wasn't created");
- } else {
- runtest.pass("the _webcamSourceBin was created");
- if ((gst_bin_get_by_name(GST_BIN(webcam->_pipeline),
- "video_source")) == NULL) {
- runtest.fail("videoSourceBin has an unexpected address");
- } else {
- runtest.pass("videoSourceBin's address is set as expected");
- }
- }
- if (webcam->_videoSource == NULL) {
- runtest.fail("the _videoSource reference wasn't created");
- } else {
- runtest.pass("the _videoSource reference was created");
- }
- if (webcam->_capsFilter == NULL && (devselect !=0)) {
- runtest.fail("the _capsFilter reference wasn't created");
- } else {
- runtest.pass("the _capsFilter reference was created");
- }
- if (webcam->_currentFormat == NULL && (devselect != 0)) {
- runtest.fail("no format was set (_currentFormat == NULL!)");
- } else {
- runtest.pass("format is set");
- }
-
- result = false;
- result = vig.webcamCreateDisplayBin(webcam);
- if (result != true) {
- runtest.fail("webcamCreateDisplayBin() returned an error");
- } else {
- runtest.pass("webcamCreateDisplayBin() isn't reporting errors");
- }
- if (webcam->_videoDisplayBin == NULL) {
- runtest.fail("the _webcamDisplayBin wasn't created");
- } else {
- runtest.pass("the _webcamDisplayBin was created");
- if ((gst_element_get_pad(webcam->_videoDisplayBin, "sink")) == NULL) {
- runtest.fail("the sink ghostpad in _videoDisplayBin wasn't
created");
- } else {
- runtest.pass("the _videoDisplayBin sink ghostpad was created");
- }
- }
-
- result = false;
- result = vig.webcamMakeVideoDisplayLink(webcam);
- if (result != true) {
- runtest.fail("making videosrc -> display link failed");
- } else {
- runtest.pass("making videosrc -> display link succeeded");
- }
-
- result = false;
- result = vig.webcamCreateSaveBin(webcam);
+ }
+
+ bool result = vig.init();
+ if (result != true) {
+ runtest.fail("Webcam inititalization");
+ } else {
+ runtest.pass("Webcam initialization was okay");
+ }
+
+ result = false;
+ result = vig.webcamCreateSaveBin();
if (result != true) {
runtest.fail("webcamCreateSaveBin() reported an error");
} else {
runtest.pass("webcamCreateSaveBin() didn't report any errors");
}
result = false;
- result = vig.webcamMakeVideoSaveLink(webcam);
+ result = vig.webcamMakeVideoSaveLink();
if (result != true) {
runtest.fail("webcamMakeVideoSaveLink() reported an error");
} else {
runtest.pass("webcamMakeVideoSaveLink() didn't report errors");
}
- if (webcam->_videoSaveBin == NULL) {
- runtest.fail("webcam->_videoSaveBin reference isn't set");
- } else {
- runtest.pass("webcam->_videoSaveBin reference is set");
- if ((gst_bin_get_by_name(GST_BIN(webcam->_pipeline),
- "video_save_bin")) == NULL) {
- runtest.fail("videoSaveBin has an unexpected address");
- } else {
- runtest.pass("videoSaveBin's address is set as expected");
- }
- if ((gst_element_get_pad(webcam->_videoSaveBin, "sink")) == NULL) {
- runtest.fail("videoSaveBin's sink ghostpad wasn't created");
- } else {
- runtest.pass("videoSaveBin properly has a sink ghostpad");
- }
- }
- if (webcam->_videoFileSink == NULL) {
- runtest.fail("webcam->_videoFileSink reference isn't set");
- } else {
- runtest.pass("webcam->_videoFileSink reference is set");
- }
//end of setup tests, now startup the webcamPipeline, run for a few seconds
//and then make sure there is a file present after running
- result = vig.webcamPlay(webcam);
+ result = vig.play();
if (result != true) {
- runtest.fail("webcamPlay() function reported an error");
+ runtest.fail("play() function reported an error");
} else {
- runtest.pass("webcamPlay() function reported no errors");
+ runtest.pass("play() function reported no errors");
}
g_print(" NOTE: the output window will close automatically\n");
- if (webcam->_pipelineIsPlaying != true) {
- runtest.fail("the _pipelineIsPlaying variable isn't being set");
- } else {
- runtest.pass("the _pipelineIsPlaying variable is properly set");
- }
- sleep(5);
- result = vig.webcamStop(webcam);
+ sleep(2);
+ result = vig.stop();
if (result != true) {
runtest.fail("webcamStop() function reported an error");
} else {
@@ -292,23 +157,23 @@
g_print(" NOTE: deleting output file...\n");
}
- result = vig.webcamBreakVideoDisplayLink(webcam);
+ result = vig.webcamBreakVideoDisplayLink();
if (result != true) {
runtest.fail("the webcamBreakVideoDisplayLink() function reported an
error");
} else {
runtest.pass("the webcamBreakVideoDisplayLink() function reported no
errors");
}
- result = vig.webcamPlay(webcam);
+ result = vig.play();
if (result != true) {
- runtest.fail("webcamPlay() reported errors after breaking display
link");
+ runtest.fail("play() reported errors after breaking display link");
} else {
- runtest.pass("webcamPlay() still works after breaking display link");
+ runtest.pass("play() still works after breaking display link");
}
g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
+ sleep(2);
- result = vig.webcamStop(webcam);
+ result = vig.stop();
if (result != true) {
runtest.fail("webcamStop() reported errors after breaking display
link");
} else {
@@ -326,30 +191,30 @@
g_print(" NOTE: deleting output file...\n");
}
- result = vig.webcamBreakVideoSaveLink(webcam);
+ result = vig.webcamBreakVideoSaveLink();
if (result != true) {
runtest.fail("breaking the videoSaveLink failed");
} else {
runtest.pass("breaking the videoSaveLink was successful");
}
- result = vig.webcamMakeVideoDisplayLink(webcam);
+ result = vig.webcamMakeVideoDisplayLink();
if (result != true) {
runtest.fail("making videosrc -> display link failed");
} else {
runtest.pass("making videosrc -> display link succeeded");
}
- result = vig.webcamPlay(webcam);
+ result = vig.play();
if (result != true) {
- runtest.fail("webcamPlay() reported errors after relinking display");
+ runtest.fail("play() reported errors after relinking display");
} else {
- runtest.pass("webcamPlay() still works after relinking display");
+ runtest.pass("play() still works after relinking display");
}
g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
+ sleep(2);
- result = vig.webcamStop(webcam);
+ result = vig.stop();
if (result != true) {
runtest.fail("webcamStop() reported errors after breaking display
link");
} else {
@@ -362,24 +227,24 @@
runtest.pass("no vidoutput.ogg file wasn't created");
}
- result = vig.webcamMakeVideoSaveLink(webcam);
+ result = vig.webcamMakeVideoSaveLink();
if (result != true) {
runtest.fail("webcamMakeVideoSaveLink() reported an error");
} else {
runtest.pass("webcamMakeVideoSaveLink() reported no errors");
}
- result = vig.webcamPlay(webcam);
+ result = vig.play();
if (result != true) {
- runtest.fail("webcamPlay() reported errors");
+ runtest.fail("play() reported errors");
} else {
- runtest.pass("webcamPlay() reported no errors");
+ runtest.pass("play() reported no errors");
}
g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
+ sleep(2);
- result = vig.webcamStop(webcam);
+ result = vig.stop();
if (result != true) {
runtest.fail("webcamStop() reported errors after breaking display
link");
} else {
@@ -399,66 +264,32 @@
//end unit tests
//tests more similar to execution flow
- gst::VideoInputGst *video = new VideoInputGst;
+ gst::VideoInputGst* video = new VideoInputGst;
if (video == NULL) {
runtest.fail("new VideoInputGst didn't work");
- } else {
+ } else {
runtest.pass("new VideoInputGst returned a value");
}
-
+
//get global webcam reference for use below
- GnashWebcamPrivate *global;
- global = video->getGlobalWebcam();
- if (global == NULL) {
- runtest.fail("couldn't get a globalwebcam video reference");
- } else {
- runtest.pass("got a globalWebcam reference");
- }
-
- if (global->_pipeline == NULL) {
- runtest.fail("video->_globalWebcam->_pipeline isn't there");
- } else {
- runtest.pass("video->_globalWebcam->_pipeline is initialized");
- }
- if (global->_webcamSourceBin == NULL) {
- runtest.fail("webcamSourceBin isn't there");
- } else {
- runtest.pass("webcamSourceBin was made by the initializer");
- }
- if (global->_webcamMainBin == NULL) {
- runtest.fail("webcamMainBin isn't there");
- } else {
- runtest.pass("webcamMainBin was made by the initializer");
- }
- if (global->_videoDisplayBin == NULL) {
- runtest.fail("videoDisplayBin isn't there");
- } else {
- runtest.pass("videoDisplayBin was made by the initializer");
- }
- if (global->_videoSaveBin == NULL) {
- runtest.fail("videoSaveBin isn't there");
- } else {
- runtest.pass("videoSaveBin was made by the initializer");
- }
-
- result = video->webcamMakeVideoDisplayLink(global);
+ result = video->webcamMakeVideoDisplayLink();
if (result != true) {
runtest.fail("webcamMakeVideoDisplayLink reported errors");
} else {
runtest.pass("webcamMakeVideoDisplayLink reported no errors");
}
- result = video->webcamPlay(global);
+ result = video->play();
if (result != true) {
- runtest.fail("webcamPlay reported errors");
+ runtest.fail("play reported errors");
} else {
- runtest.pass("webcamPlay reported no errors");
+ runtest.pass("play reported no errors");
}
g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
+ sleep(2);
- result = video->webcamStop(global);
+ result = video->stop();
if (result != true) {
runtest.fail("webcamStop reported errors");
} else {
@@ -466,51 +297,44 @@
}
g_print(" NOTE: changing values (display window should be
bigger)....\n");
- video->set_fps(30);
- video->set_width(800);
- video->set_height(600);
- result = video->webcamChangeSourceBin(global);
- if (result != true) {
- runtest.fail("webcamChangeSourceBin reported an error");
- } else {
- runtest.pass("webcamChangeSourceBin reported no errors");
- }
+
+ video->requestMode(800, 600, 30, true);
- result = video->webcamPlay(global);
+ result = video->play();
if (result != true) {
- runtest.fail("webcamPlay reported errors");
+ runtest.fail("play reported errors");
} else {
- runtest.pass("webcamPlay reported no errors");
+ runtest.pass("play reported no errors");
}
g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
+ sleep(2);
- result = video->webcamStop(global);
+ result = video->stop();
if (result != true) {
runtest.fail("webcamStop reported errors");
} else {
runtest.pass("webcamStop reported no errors");
}
- result = video->webcamMakeVideoSaveLink(global);
+ result = video->webcamMakeVideoSaveLink();
if (result != true) {
runtest.fail("webcamMakeVideoSaveLink reported errors");
} else {
runtest.pass("webcamMakeVideoSaveLink reported no errors");
}
- result = video->webcamPlay(global);
+ result = video->play();
if (result != true) {
- runtest.fail("webcamPlay reported errors");
+ runtest.fail("play reported errors");
} else {
- runtest.pass("webcamPlay reported no errors");
+ runtest.pass("play reported no errors");
}
g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
+ sleep(2);
- result = video->webcamStop(global);
+ result = video->stop();
if (result != true) {
runtest.fail("webcamStop reported errors");
} else {
@@ -526,24 +350,24 @@
g_print(" NOTE: deleting output file...\n");
}
- result = video->webcamBreakVideoDisplayLink(global);
+ result = video->webcamBreakVideoDisplayLink();
if (result != true) {
runtest.fail("webcamBreakVideoDisplayLink reported errors");
} else {
runtest.pass("webcamBreakVideoDisplayLink reported no errors");
}
- result = video->webcamPlay(global);
+ result = video->play();
if (result != true) {
- runtest.fail("webcamPlay reported errors");
+ runtest.fail("play reported errors");
} else {
- runtest.pass("webcamPlay reported no errors");
+ runtest.pass("play reported no errors");
}
g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
+ sleep(2);
- result = video->webcamStop(global);
+ result = video->stop();
if (result != true) {
runtest.fail("webcamStop reported errors");
} else {
@@ -560,119 +384,43 @@
g_print(" NOTE: deleting output file...\n");
}
- result = video->webcamMakeVideoDisplayLink(global);
+ result = video->webcamMakeVideoDisplayLink();
if (result != true) {
runtest.fail("webcamMakeVideoDisplayLink failed after breaking the
link");
} else {
runtest.pass("webcamMakeVideoDisplayLink reported no errors");
}
- result = video->webcamBreakVideoSaveLink(global);
+ result = video->webcamBreakVideoSaveLink();
if (result != true) {
runtest.fail("webcamBreakVideoSaveLink function reported errors");
} else {
runtest.pass("webcamBreakVideoSaveLink function reported no errors");
}
- result = video->webcamPlay(global);
- if (result != true) {
- runtest.fail("webcamPlay reported errors");
- } else {
- runtest.pass("webcamPlay reported no errors");
- }
-
- g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
-
- result = video->webcamStop(global);
- if (result != true) {
- runtest.fail("webcamStop reported errors");
- } else {
- runtest.pass("webcamStop reported no errors");
- }
-
- if (stat(file.c_str(), &st) == 0) {
- runtest.fail("a vidoutput.ogg file was created, and it shouldn't be");
- } else {
- runtest.pass("no vidoutput.ogg file wasn't created");
- }
-
- //pass bad values
- g_print(" NOTE: changing values to bad vals....\n");
- video->set_fps(200);
- video->set_width(8000);
- video->set_height(6000);
- result = video->webcamChangeSourceBin(global);
- if (result != true) {
- runtest.fail("webcamChangeSourceBin reported an error");
- } else {
- runtest.pass("webcamChangeSourceBin reported no errors");
- }
-
- result = video->webcamPlay(global);
- if (result != true) {
- runtest.fail("webcamPlay reported errors");
- } else {
- runtest.pass("webcamPlay reported no errors");
- }
-
- g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
-
- result = video->webcamStop(global);
- if (result != true) {
- runtest.fail("webcamStop reported errors");
- } else {
- runtest.pass("webcamStop reported no errors");
- }
-
- if (stat(file.c_str(), &st) == 0) {
- runtest.fail("a vidoutput.ogg file was created, and it shouldn't be");
- } else {
- runtest.pass("no vidoutput.ogg file wasn't created");
- }
-
- //reset to good vals
- g_print(" NOTE: changing back to legit vals....\n");
- video->set_fps(30);
- video->set_width(320);
- video->set_height(240);
-
- result = video->webcamMakeVideoSaveLink(global);
- if (result != true) {
- runtest.fail("webcamMakeVideoSaveLink reported an error");
- } else {
- runtest.pass("webcamMakeVideoSaveLink reported no errors");
- }
- result = video->webcamChangeSourceBin(global);
- if (result != true) {
- runtest.fail("webcamChangeSourceBin reported an error");
- } else {
- runtest.pass("webcamChangeSourceBin reported no errors");
- }
-
- result = video->webcamPlay(global);
- if (result != true) {
- runtest.fail("webcamPlay reported errors");
- } else {
- runtest.pass("webcamPlay reported no errors");
- }
-
- g_print(" NOTE: sleeping for 5 seconds here....\n");
- sleep(5);
-
- result = video->webcamStop(global);
- if (result != true) {
- runtest.fail("webcamStop reported errors");
- } else {
- runtest.pass("webcamStop reported no errors");
- }
-
- if (stat(file.c_str(), &st) == 0) {
- runtest.pass("the a new vidoput.ogg file was created");
- } else {
- runtest.fail("there's no new vidoutput.ogg file!");
- }
+ result = video->play();
+ if (result != true) {
+ runtest.fail("play reported errors");
+ } else {
+ runtest.pass("play reported no errors");
+ }
+
+ g_print(" NOTE: sleeping for 5 seconds here....\n");
+ sleep(2);
+
+ result = video->stop();
+ if (result != true) {
+ runtest.fail("webcamStop reported errors");
+ } else {
+ runtest.pass("webcamStop reported no errors");
+ }
+
+ if (stat(file.c_str(), &st) == 0) {
+ runtest.fail("a vidoutput.ogg file was created, and it shouldn't be");
+ } else {
+ runtest.pass("no vidoutput.ogg file wasn't created");
+ }
+
}
- [Gnash-commit] /srv/bzr/gnash/trunk r11446: Implement part of an interface for VideoInput, add notes on what's required,
Benjamin Wolsey <=