summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTim Redfern <tim@herge.(none)>2013-04-26 18:40:39 +0100
committerTim Redfern <tim@herge.(none)>2013-04-26 18:40:39 +0100
commitf09e12889c4ebfa3db21fa30955698171a22757b (patch)
treeb6cdfea066c94300325c9163fbe813acef467c31
parente9320eb83115f66e99bd98e76c8c9ff78ca043d4 (diff)
tinkering gstreamer loader
-rwxr-xr-xrotord/01.xml6
-rw-r--r--rotord/Makefile8
-rw-r--r--rotord/Pixels.cpp5
-rw-r--r--rotord/Pixels.h1
-rw-r--r--rotord/gstvideoloader.cpp11
-rw-r--r--rotord/gstvideoloader.h10
-rw-r--r--rotord/rendercontext.cpp31
-rwxr-xr-xrotord/rotor.cpp269
-rwxr-xr-xrotord/rotor.h18
-rw-r--r--rotord/rotord.cbp6
10 files changed, 80 insertions, 285 deletions
diff --git a/rotord/01.xml b/rotord/01.xml
index 0fe762a..e4b9067 100755
--- a/rotord/01.xml
+++ b/rotord/01.xml
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="ISO-8859-1"?>
-<patchbay ID="0f7aa258-7c2f-11e2-abbd-133252267708" >Sample template ©Rotor 2013
+<patchbay ID="0f7aa258-7c2f-11e2-abbd-133252267708">Sample template ©Rotor 2013
<node ID="01" type="audio_analysis" soname="qm-vamp-plugins" id="qm-tempotracker" output="signal">beats
</node>
<node ID="02" type="audio_analysis" soname="qm-vamp-plugins" id="qm-segmenter" output="signal">segmenter
@@ -10,9 +10,9 @@
<node ID="04" type="bang" output="signal">outputs a single 1 every time signal enters a new number
<signal_input from="03">signal to analyse</signal_input>
</node>
- <node ID="05" type="video_input">performance video
+ <node ID="05" type="video_input" output="image">performance video
</node>
<node ID="06" type="video_output">video output
- <image_input from="05">image to output</signal_input>
+ <image_input from="05">image to output</image_input>
</node>
</patchbay>
diff --git a/rotord/Makefile b/rotord/Makefile
index 0f8e736..2d9b597 100644
--- a/rotord/Makefile
+++ b/rotord/Makefile
@@ -2,13 +2,15 @@
#http://docs.gstreamer.com/display/GstSDK/Installing+on+Linux
-MY_CFLAGS = -fpermissive -std=c++11 -Wno-error -I /opt/gstreamer-sdk/include/gstreamer-0.10/ -I /opt/gstreamer-sdk/include/glib-2.0 -I /opt/gstreamer-sdk/lib/glib-2.0/include -I /opt/gstreamer-sdk/include/libxml2
+#MY_CFLAGS = -fpermissive -std=c++11 -Wno-error -I /opt/gstreamer-sdk/include/gstreamer-0.10/ -I /opt/gstreamer-sdk/include/glib-2.0 -I /opt/gstreamer-sdk/lib/glib-2.0/include -I /opt/gstreamer-sdk/include/libxml2 $(shell pkg-config gstreamer-0.10 gstreamer-video-0.10 gstreamer-base-0.10 --cflags)
+MY_CFLAGS = -fpermissive -std=c++11 -Wno-error $(shell pkg-config gstreamer-0.10 gstreamer-video-0.10 gstreamer-base-0.10 --cflags)
# -I ../ffmpeg
# The linker options.libgstaasinklibgstaasink.so
-
-MY_LIBS = -lPocoNet -lPocoXML -lPocoUtil -lPocoFoundation -lvamp-hostsdk -lsndfile -L /usr/local/lib -lswscale -lavcodec -lavformat -lavfilter -lavdevice -lavutil -lgstreamer-0.10 -lglib-2.0 -lgstapp-0.10
+MY_LIBS = -lPocoNet -lPocoXML -lPocoUtil -lPocoFoundation -lvamp-hostsdk -lsndfile -L /usr/local/lib -lswscale -lavcodec -lavformat -lavfilter -lavdevice -lavutil -lgstvideo-0.10 -lgstbase-0.10 -lgstreamer-0.10 -lgobject-2.0 -lgmodule-2.0 -lgthread-2.0 -lrt -lxml2 -lglib-2.0 -lgstapp-0.10
+#MY_LIBS = -lPocoNet -lPocoXML -lPocoUtil -lPocoFoundation -lvamp-hostsdk -lsndfile -L /usr/local/lib -lswscale -lavcodec -lavformat -lavfilter -lavdevice -lavutil $(shell pkg-config gstreamer-0.10 gstreamer-video-0.10 gstreamer-base-0.10 --libs)
+# -lgstreamer-0.10 -lgstreamer-video-0.10 -lgstreamer-base-0.10 -lglib-2.0 -lgstapp-0.10
#MY_LIBS = ../libavcodec/ffmpeg/libavcodec/libavcodec.a ../libavcodec/ffmpeg/libavutil/libavutil.a ../libavcodec/ffmpeg/libavformat/libavformat.a ../libavcodec/ffmpeg/libavfilter/libavfilter.a ../libavcodec/ffmpeg/libavdevice/libavdevice.a -lPocoNet -lPocoXML -lPocoUtil -lPocoFoundation -lvamp-hostsdk
#GAH! HARD!
diff --git a/rotord/Pixels.cpp b/rotord/Pixels.cpp
index eef86c9..78f4bbb 100644
--- a/rotord/Pixels.cpp
+++ b/rotord/Pixels.cpp
@@ -1,5 +1,8 @@
#include "Pixels.h"
-
+Pixels::Pixels(){
+ pixels=nullptr;
+ pixelsOwner=false;
+}
Pixels::~Pixels(){
clear();
}
diff --git a/rotord/Pixels.h b/rotord/Pixels.h
index 84942af..b6f5865 100644
--- a/rotord/Pixels.h
+++ b/rotord/Pixels.h
@@ -4,6 +4,7 @@
class Pixels{
public:
+ Pixels();
~Pixels();
void allocate(int w, int h, int channels);
bool isAllocated() const;
diff --git a/rotord/gstvideoloader.cpp b/rotord/gstvideoloader.cpp
index 2835b02..c7a9305 100644
--- a/rotord/gstvideoloader.cpp
+++ b/rotord/gstvideoloader.cpp
@@ -1,12 +1,6 @@
#include "gstvideoloader.h"
-#include <gst/gst.h>
-#include <gst/app/gstappsink.h>
-#include <gst/video/video.h>
-#include <glib-object.h>
-#include <glib.h>
-#include <algorithm>
using namespace std;
@@ -949,7 +943,7 @@ bool ofGstVideoPlayer::allocate(int bpp){
nFrames = 0;
if(GstPad* pad = gst_element_get_static_pad(videoUtils.getSink(), "sink")){
-/*#if GST_VERSION_MAJOR==0
+#if GST_VERSION_MAJOR==0
int width,height;
if(gst_video_get_size(GST_PAD(pad), &width, &height)){
if(!videoUtils.allocate(width,height,bpp)) return false;
@@ -971,7 +965,6 @@ bool ofGstVideoPlayer::allocate(int bpp){
}
bIsAllocated = true;
#else
-*/
if(GstCaps *caps = gst_pad_get_current_caps (GST_PAD (pad))){
GstVideoInfo info;
gst_video_info_init (&info);
@@ -991,7 +984,7 @@ bool ofGstVideoPlayer::allocate(int bpp){
cerr<<"GStreamer: cannot get pipeline caps"<<endl;
bIsAllocated = false;
}
-//#endif
+#endif
gst_object_unref(GST_OBJECT(pad));
}else{
cerr<<"GStreamer: cannot get sink pad"<<endl;
diff --git a/rotord/gstvideoloader.h b/rotord/gstvideoloader.h
index 867da01..b354fa7 100644
--- a/rotord/gstvideoloader.h
+++ b/rotord/gstvideoloader.h
@@ -1,6 +1,16 @@
#include <string>
#include <iostream>
+#include <gst/gst.h>
+#include <gst/app/gstappsink.h>
+#include <gst/video/video.h>
+#include <gst/audio/multichannel.h>
+
+#include <glib-object.h>
+#include <glib.h>
+#include <algorithm>
+
+#define GST_DISABLE_DEPRECATED
#include <gst/gstpad.h>
#include "Pixels.h"
#include "ofUtils.h"
diff --git a/rotord/rendercontext.cpp b/rotord/rendercontext.cpp
index 948ef31..d6469bd 100644
--- a/rotord/rendercontext.cpp
+++ b/rotord/rendercontext.cpp
@@ -48,6 +48,11 @@ void Render_context::add_queue(int item) {
Command_response Render_context::session_command(const std::vector<std::string>& command){
//method,id,command1,{command2,}{body}
//here we allow the controlling server to communicate with running tasks
+ string ems="";
+ for (auto i:command) ems=ems+i+":";
+
+ cerr<<"Rotor: session command with "<<command.size()<<" arguments- "<<ems<<endl;
+
Command_response response;
response.status=HTTPResponse::HTTP_BAD_REQUEST;
if (command[2]=="audio") {
@@ -184,18 +189,24 @@ Command_response Render_context::session_command(const std::vector<std::string>&
response.description+="<progress>45.2</progress>\n";
}
if (command[0]=="PUT") { //get vide file location and initiate analysis
- if (command.size()>2) {
+ if (command.size()>4) { //there should be a filename + a destination node
if (state==IDLE) {
//check file exists
- Poco::File f=Poco::File(command[3]);
+ Poco::File f=Poco::File(command[4]);
if (f.exists()) {
+ if (load_video(command[3],command[4])) {
//pass to worker thread ??if engine is ready?? ??what if engine has finished but results aren't read??
//DUMMY RESPONSE
- response.description="<status context='"+command[1]+"'>DUMMY RESPONSE Starting video analysis: "+command[3]+"</status>\n";
+ response.description="<status context='"+command[1]+"'>Rotor: succesfully loaded "+command[4]+" into video node "+command[3]+"</status>\n";
+ }
+ else {
+ response.status=HTTPResponse::HTTP_INTERNAL_SERVER_ERROR;
+ response.description="<status context='"+command[1]+"'>Rotor: could not load "+command[4]+" into video node "+command[3]+"</status>\n";
+ }
}
- else {
+ else {
response.status=HTTPResponse::HTTP_NOT_FOUND;
- response.description="<status context='"+command[1]+"'>File "+command[3]+" not found</status>\n";
+ response.description="<status context='"+command[1]+"'>File '"+command[4]+"' not found</status>\n";
}
}
else {
@@ -203,10 +214,14 @@ Command_response Render_context::session_command(const std::vector<std::string>&
response.description="<status context='"+command[1]+"'>Rotor: session busy</status>\n";
}
}
+ else {
+ response.status=HTTPResponse::HTTP_BAD_REQUEST;
+ response.description="<status context='"+command[1]+"''>Rotor: bad request</status>\n";
+ }
}
if (command[0]=="DELETE") {
//DUMMY RESPONSE
- response.description="<status>DUMMY RESPONSE 1</status>\n";
+ response.description="<status context='"+command[1]+"''>DUMMY RESPONSE 1</status>\n";
response.status=HTTPResponse::HTTP_OK;
}
@@ -410,8 +425,8 @@ bool Render_context::load_audio(const string &filename,vector<Base_audio_process
return true;
}
-bool Render_context::load_video(string nodeID,string &filename){
- //this is a good standard example of how to find
+bool Render_context::load_video(const string &nodeID,const string &filename){
+ //this is a good standard example of how to find
//a node of a specific type by ID and do something
if (graph.nodes.find(nodeID)!=graph.nodes.end()){
if (graph.nodes[nodeID]->type=="video_input") {
diff --git a/rotord/rotor.cpp b/rotord/rotor.cpp
index 20c6d3a..23e2196 100755
--- a/rotord/rotor.cpp
+++ b/rotord/rotor.cpp
@@ -155,141 +155,7 @@ void Audio_analysis::print_features(){
cerr<<i.second<<" "<<i.first<<endl;
}
}
- /*
- //testing signal routes
- cerr << "Rotor: Signal_output rendering " << duration << " seconds at " << framerate << " frames per second" << endl;
- float step=1.0f/framerate;
- float v=0.0f;
- for (float f=0.0f;f<duration;f+=step) {
- float u=get_output(Time_spec(f,framerate));
- if (!fequal(u,v)) {
- xml_out+=("<signal time='"+ofToString(f)+"'>"+ofToString(u)+"</signal>\n");
- v=u;
- }
- }
- return true;
- */
-/*
-bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename){
- //render out the network
-
- //set up output context
- //then iterate through frames
- //querying graph at each frame
-
- av_register_all();
-
- AVCodec *codec;
- AVCodecContext *c= NULL;
- int i, out_size, size, x, y, outbuf_size;
- FILE *f;
- AVFrame *picture;
- uint8_t *outbuf, *picture_buf;
-
- cerr << "Rotor: rendering " << output_filename << " , " << duration << " seconds at " << framerate << " frames per second" << endl;
-
- codec = avcodec_find_encoder(AV_CODEC_ID_H264);
- if (!codec) {
- cerr<< "codec not found" << endl;
- return false;
- }
-
- c= avcodec_alloc_context3(codec);
- picture= avcodec_alloc_frame();
-
- // put sample parameters /
- c->bit_rate = 400000;
- // resolution must be a multiple of two /
- c->width = 640;
- c->height = 480;
- // frames per second /
- c->time_base= (AVRational){1,25};
- c->gop_size = 10; // emit one intra frame every ten frames /
- c->max_b_frames=1;
- c->pix_fmt = PIX_FMT_YUV420P; //AV_PIX_FMT_RGB24
-
- AVDictionary *options; //= NULL; causes a forward declaration error!?
- options=NULL;
-
- // open it /
- if (avcodec_open2(c, codec, &options) < 0) {
- cerr << "could not open codec" << endl;
- return false;
- }
-
- f = fopen(output_filename.c_str(), "wb");
- if (!f) {
- cerr << "could not open "<< output_filename<<endl;
- return false;
- }
-
- // alloc image and output buffer/
- outbuf_size = 100000;
- outbuf = malloc(outbuf_size);
- size = c->width * c->height;
- picture_buf = malloc((size * 3) / 2); // size for YUV 420 /
-
- picture->data[0] = picture_buf;
- picture->data[1] = picture->data[0] + size;
- picture->data[2] = picture->data[1] + size / 4;
- picture->linesize[0] = c->width;
- picture->linesize[1] = c->width / 2;
- picture->linesize[2] = c->width / 2;
-
- // encode 1 second of video /
- for(i=0;i<250;i++) {
- fflush(stdout);
- // prepare a dummy image /
- // Y /
- for(y=0;y<c->height;y++) {
- for(x=0;x<c->width;x++) {
- picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
- }
- }
-
- // Cb and Cr /
- for(y=0;y<c->height/2;y++) {
- for(x=0;x<c->width/2;x++) {
- picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
- picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
- }
- }
-
- // encode the image /
- out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
- printf("encoding frame %3d (size=%5d)\n", i, out_size);
- fwrite(outbuf, 1, out_size, f);
- }
-
- // get the delayed frames /
- for(; out_size; i++) {
- fflush(stdout);
-
- out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
- printf("write frame %3d (size=%5d)\n", i, out_size);
- fwrite(outbuf, 1, out_size, f);
- }
-
- // add sequence end code to have a real mpeg file /
- outbuf[0] = 0x00;
- outbuf[1] = 0x00;
- outbuf[2] = 0x01;
- outbuf[3] = 0xb7;
- fwrite(outbuf, 1, 4, f);
- fclose(f);
- free(picture_buf);
- free(outbuf);
-
-
- avcodec_close(c);
- av_free(c);
- av_free(picture);
- printf("\n");
-
- return true;
-}
-*/
bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename){
//
@@ -326,123 +192,22 @@ bool Video_output::render(const float duration, const float framerate,const stri
return false;
}
-//new version from libav examples
-/*
- AVOutputFormat *fmt;
- AVFormatContext *oc;
- AVStream *audio_st, *video_st;
- double audio_pts, video_pts;
- int i;
-
- //Initialize libavcodec, and register all codecs and formats. //
- av_register_all();
- //think about this: when to register and unregister?
-
-
- //Autodetect the output format from the name. default is MPEG. //
- fmt = av_guess_format(NULL, output_filename.c_str(), NULL);
- if (!fmt) {
- printf("Could not deduce output format from file extension: using MPEG.\n");
- fmt = av_guess_format("mpeg", NULL, NULL);
- }
- if (!fmt) {
- cerr << "Rotor: could not find suitable output format" << endl;
- return false;
- }
-
- //Allocate the output media context. //
- oc = avformat_alloc_context();
- if (!oc) {
- cerr <<"Rotor: memory error"<< endl;
- return false;
- }
- oc->oformat = fmt;
- snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
-
- //Add the audio and video streams using the default format codecs
- * and initialize the codecs. //
- video_st = NULL;
- audio_st = NULL;
- if (fmt->video_codec != AV_CODEC_ID_NONE) {
- video_st = add_video_stream(oc, fmt->video_codec);
- }
- if (fmt->audio_codec != AV_CODEC_ID_NONE) {
- audio_st = add_audio_stream(oc, fmt->audio_codec);
- }
-
- //Now that all the parameters are set, we can open the audio and
- * video codecs and allocate the necessary encode buffers. //
- if (video_st)
- open_video(oc, video_st);
- if (audio_st)
- open_audio(oc, audio_st);
-
- av_dump_format(oc, 0, filename, 1);
-
- //open the output file, if needed //
- if (!(fmt->flags & AVFMT_NOFILE)) {
- if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
- cerr <<"Could not open "<<output_filename<<endl;
- return false;
- }
- }
-
- //Write the stream header, if any. //
- avformat_write_header(oc, NULL);
-
- for (;;) {
- //Compute current audio and video time. //
- if (audio_st)
- audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
- else
- audio_pts = 0.0;
-
- if (video_st)
- video_pts = (double)video_st->pts.val * video_st->time_base.num /
- video_st->time_base.den;
- else
- video_pts = 0.0;
-
- if ((!audio_st || audio_pts >= STREAM_DURATION) &&
- (!video_st || video_pts >= STREAM_DURATION))
- break;
-
- //write interleaved audio and video frames //
- if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
- write_audio_frame(oc, audio_st);
- } else {
- write_video_frame(oc, video_st);
- }
- }
-
- //Write the trailer, if any. The trailer must be written before you
- // close the CodecContexts open when you wrote the header; otherwise
- // av_write_trailer() may try to use memory that was freed on
- // av_codec_close(). //
- //av_write_trailer(oc);
-
- //Close each codec. //
- if (video_st)
- close_video(oc, video_st);
- if (audio_st)
- close_audio(oc, audio_st);
-
- //Free the streams. //
- for (i = 0; i < oc->nb_streams; i++) {
- av_freep(&oc->streams[i]->codec);
- av_freep(&oc->streams[i]);
- }
-
- if (!(fmt->flags & AVFMT_NOFILE))
- //Close the output file. //
- avio_close(oc->pb);
-
- //free the stream //
- av_free(oc);
-
- return true;
- */
-
bool Video_input::load(const string &filename){
-
+ if (player->loadMovie(filename)){
+ player->play();
+ player->setPaused(true);
+ cerr<<"loaded "<<filename<<", "<<player->getDuration()<<" seconds "<<", "<<player->getWidth()<<"x"<<player->getWidth()<<endl;
+ return true;
+ }
+ return false;
}
+Image* Video_input::get_output(const Frame_spec &frame){
+ //wonder about the actual mechanism used by gstreamer
+ //have to implment callback when seek is ready?
+ //presume gstreamer caches a loaded frame?
+ //if (player->isLoaded()){
+ // image.setup(player)
+
+ //}
+ return nullptr;
+};
diff --git a/rotord/rotor.h b/rotord/rotor.h
index ef1ae23..f530bc3 100755
--- a/rotord/rotor.h
+++ b/rotord/rotor.h
@@ -201,7 +201,7 @@ namespace Rotor {
class Frame_spec{
public:
Frame_spec(float _time,float _framerate,int _w,int _h){ time=_time; framerate=_framerate; w=_w; h=_h;};
- float time;
+ float time; //this hould probably be implemented with a num/denom scheme eventually for accuracy
float framerate;
int h,w;
Frame_spec lastframe(){
@@ -393,7 +393,7 @@ namespace Rotor {
return image;
}
private:
- Image *image;
+ Image *image; //is an image generator
};
class Video_output: public Image_node {
public:
@@ -422,16 +422,16 @@ namespace Rotor {
Video_input(){};
Video_input(map<string,string> &settings) {
base_settings(settings);
- loader=new ofGstVideoUtils();
+ player=new ofGstVideoPlayer();
+ image=new Image();
};
- ~Video_input(){ delete loader; };
+ ~Video_input(){ delete player; delete image;};
bool load(const string &filename);
- Image *get_output(const Frame_spec &frame){
- return nullptr;
- };
+ Image *get_output(const Frame_spec &frame);
Video_input* clone(map<string,string> &_settings) { return new Video_input(_settings);};
private:
- ofGstVideoUtils *loader;
+ ofGstVideoPlayer *player;
+ Image *image;
};
//-------------------------------------------------------------------
class Node_factory{
@@ -514,7 +514,7 @@ namespace Rotor {
int make_preview(int nodeID, float time); //starts a frame preview - returns status code - how to retrieve?
bool load_audio(const string &filename,vector<Base_audio_processor*> processors);
Render_requirements get_requirements();
- bool load_video(string nodeID,string &filename);//can be performance or clip
+ bool load_video(const string &nodeID,const string &filename);//can be performance or clip
private:
int state;
double progress; //for a locking process: audio analysis or rendering
diff --git a/rotord/rotord.cbp b/rotord/rotord.cbp
index 1a45db2..7a975b4 100644
--- a/rotord/rotord.cbp
+++ b/rotord/rotord.cbp
@@ -49,7 +49,12 @@
<Add option="-Wall" />
</Compiler>
<Unit filename="Makefile" />
+ <Unit filename="Pixels.cpp" />
+ <Unit filename="Pixels.h" />
<Unit filename="avCodec.h" />
+ <Unit filename="graph.cpp" />
+ <Unit filename="gstvideoloader.cpp" />
+ <Unit filename="gstvideoloader.h" />
<Unit filename="libavaudioloader.cpp" />
<Unit filename="libavaudioloader.h" />
<Unit filename="libavexporter.cpp" />
@@ -58,6 +63,7 @@
<Unit filename="ofUtils.h" />
<Unit filename="ofxMovieExporter.cpp" />
<Unit filename="ofxMovieExporter.h" />
+ <Unit filename="rendercontext.cpp" />
<Unit filename="rotor.cpp" />
<Unit filename="rotor.h" />
<Unit filename="rotord.cpp" />