diff options
| -rw-r--r-- | rotord/graph.cpp | 74 | ||||
| -rw-r--r-- | rotord/rendercontext.cpp | 414 | ||||
| -rwxr-xr-x | rotord/rotor.cpp | 500 | ||||
| -rwxr-xr-x | rotord/rotor.h | 18 | ||||
| -rw-r--r-- | rotord/styles.xml | 12 |
5 files changed, 520 insertions, 498 deletions
diff --git a/rotord/graph.cpp b/rotord/graph.cpp new file mode 100644 index 0000000..7eeeec3 --- /dev/null +++ b/rotord/graph.cpp @@ -0,0 +1,74 @@ +#include "rotor.h" + +using namespace Rotor; +const string Graph::toString(){ + string xmlgraph; + if (loaded) { + xml.copyXmlToString(xmlgraph); + return xmlgraph; + } + else return ""; +} +bool Graph::load(string &filename){ + loaded=false; + printf("loading graph: %s\n",filename.c_str()); + if(xml.loadFile(filename) ){ + init(xml.getAttribute("patchbay","ID","",0),xml.getValue("patchbay","",0)); + if(xml.pushTag("patchbay")) { + int n1=xml.getNumTags("node"); + for (int i1=0;i1<n1;i1++){ + map<string,string> settings; + vector<string> attrs; + xml.getAttributeNames("node",attrs,i1); + for (auto& attr: attrs) { + settings[attr]=xml.getAttribute("node",attr,"",i1); + //cerr << "Got attribute: " << attr << ":" << xml.getAttribute("node",attr,"",i1) << endl; + } + settings["description"]=xml.getValue("node","",i1); + Node* node=factory.create(settings); + if (node) { + cerr << "Rotor: created '" << xml.getAttribute("node","type","",i1) << "'" << endl; + string nodeID=xml.getAttribute("node","ID","",i1); + nodes[nodeID]=node; + if(xml.pushTag("node",i1)) { + int n2=xml.getNumTags("signal_input"); + for (int i2=0;i2<n2;i2++){ + nodes[nodeID]->create_signal_input(xml.getValue("signal_input","",i2)); + string fromID=xml.getAttribute("signal_input","from","",i2); + if (nodes.find(fromID)!=nodes.end()) { + if (!nodes[nodeID]->inputs[i2]->connect((Signal_node*)nodes[fromID])){ + cerr << "Rotor: graph loader cannot connect input " << i2 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; + return false; + } + else cerr << "Rotor: linked input " << i2 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; + } + else cerr << "Rotor: linking input " << i2 << " of node: '" << nodeID << "', cannot find target '" << fromID << "'" << endl; + } + int n3=xml.getNumTags("image_input"); + for (int i3=0;i3<n3;i3++){ + ((Image_node*)nodes[nodeID])->create_image_input(xml.getValue("image_input","",i3)); + string fromID=xml.getAttribute("image_input","from","",i3); + if (nodes.find(fromID)!=nodes.end()) { + if (!(((Image_node*)nodes[nodeID])->image_inputs[i3]->connect((Image_node*)nodes[fromID]))){ + cerr << "Rotor: graph loader cannot connect image input " << i3 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; + return false; + } + else cerr << "Rotor: linked image input " << i3 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; + } + else cerr << "Rotor: linking image input " << i3 << " of node: '" << nodeID << "', cannot find target '" << fromID << "'" << endl; + } + xml.popTag(); + } + } + else { + cerr << "Rotor: graph loader cannot find node '" << xml.getAttribute("node","type","",i1) << "'" << endl; + return false; + } + } + xml.popTag(); + } + loaded=true; + return true; + } + else return false; +}
\ No newline at end of file diff --git a/rotord/rendercontext.cpp b/rotord/rendercontext.cpp new file mode 100644 index 0000000..dfb9021 --- /dev/null +++ b/rotord/rendercontext.cpp @@ -0,0 +1,414 @@ +#include "rotor.h" + +using namespace Rotor; +void Render_context::runTask() { + while (!isCancelled()) { + int cmd=0; + mutex.lock(); + if (work_queue.size()){ + cmd=work_queue[0]; + work_queue.pop_front(); + } + mutex.unlock(); + if(cmd==ANALYSE_AUDIO) { + state=ANALYSING_AUDIO; + vector<Base_audio_processor*> processors; + processors.push_back(audio_thumb); + vector<Node*> analysers=graph.find_nodes("audio_analysis"); + for (auto a: analysers) { + processors.push_back(dynamic_cast<Base_audio_processor*>(a)); + } + if (load_audio(audio_filename,processors)) { + state=AUDIO_READY; + } + else { + //an error occurred: TODO have to clean up allocated data. autoptr? + state=IDLE; + } + } + if(cmd==RENDER) { + state=RENDERING; + if(graph.video_render(output_filename,audio_filename,output_framerate)){ + state=RENDER_READY; + } + else { + //an error occurred: TODO have to clean up allocated data. autoptr? + state=IDLE; + } + } + sleep(100); + } + printf("Rotor: stopping thread\n"); +} +void Render_context::add_queue(int item) { + mutex.lock(); + work_queue.push_back(item); + mutex.unlock(); +} +Command_response Render_context::session_command(const std::vector<std::string>& command){ + //method,id,command1,{command2,}{body} + //here we allow the controlling server to communicate with running tasks + Command_response response; + response.status=HTTPResponse::HTTP_BAD_REQUEST; + if (command[2]=="audio") { + if (command[0]=="PUT") { //get audio file location and initiate analysis + if (command.size()>2) { + if (state==IDLE) { + //check file exists + Poco::File f=Poco::File(command[3]); + if (f.exists()) { + //pass to worker thread ??if engine is ready?? ??what if engine has finished but results aren't read?? + audio_filename=command[3]; //for now, store session variables in memory + add_queue(ANALYSE_AUDIO); + response.status=HTTPResponse::HTTP_OK; + response.description="<status context='"+command[1]+"'>Starting audio analysis: "+command[3]+"</status>\n"; + } + else { + response.status=HTTPResponse::HTTP_NOT_FOUND; + response.description="<status context='"+command[1]+"'>File "+command[3]+" not found</status>\n"; + } + + } + else { + response.status=HTTPResponse::HTTP_BAD_REQUEST; + response.description="<status context='"+command[1]+"'>Rotor: session busy</status>\n"; + } + } + } + if (command[0]=="GET") { + if (state==ANALYSING_AUDIO) { + response.status=HTTPResponse::HTTP_OK; + response.description="<status context='"+command[1]+"'>Rotor: analysing audio</status>\n"; + char c[20]; + sprintf(c,"%02f",progress); + response.description+="<progress>"+string(c)+"</progress>\n"; + } + if (state==AUDIO_READY) { + //not sure about this-- should this state be retained? + //can the data only be read once? + //for now + response.status=HTTPResponse::HTTP_OK; + response.description="<status context='"+command[1]+"'>Rotor: audio ready</status>\n"; + response.description+="<audio>\n"; + response.description+=audio_thumb->print(); + response.description+="</audio>"; + state=IDLE; + } + } + if (command[0]=="DELETE") { + //for now + audio_filename=""; + response.description="<status>1</status>\n"; + response.status=HTTPResponse::HTTP_OK; + } + } + if (command[2]=="graph") { + if (command[0]=="GET") { + if (graph.loaded) { + response.status=HTTPResponse::HTTP_OK; + response.description=graph.toString(); + } + else { + response.description="<status>Rotor: graph not loaded</status>\n"; + } + } + if (command[0]=="PUT") { //get new graph from file + if (command.size()>2) { + //should interrupt whatever is happening? + //before begining to load from xml + if (state==IDLE) { //eventually not like this + Poco::File f=Poco::File(command[3]); + if (f.exists()) { + string graph_filename=command[3]; + if (graph.load(graph_filename)) { + response.status=HTTPResponse::HTTP_OK; + //response.description="<status context='"+command[1]+"'>Rotor: loaded graph "+command[3]+"</status>\n"; + response.description=graph.toString(); + //the graph could actually contain an xml object and we could just print it here? + //or could our nodes even be subclassed from xml nodes? + //the graph or the audio could load first- have to analyse the audio with vamp after the graph is loaded + //for now the graph must load 1st + } + else { + response.status=HTTPResponse::HTTP_INTERNAL_SERVER_ERROR; //~/sources/poco-1.4.6-all/Net/include/Poco/Net/HTTPResponse.h + response.description="<status context='"+command[1]+"'>Rotor: could not load graph "+command[3]+"</status>\n"; + } + } + else { + response.status=HTTPResponse::HTTP_NOT_FOUND; + response.description="<status context='"+command[1]+"'>File "+command[3]+" not found</status>\n"; + } + } + } + } + if (command[0]=="DELETE") { + //for now + graph=Graph(); + response.description="<status>1</status>\n"; + response.status=HTTPResponse::HTTP_OK; + } + } + if (command[2]=="signal") { + if (command[0]=="GET") { //generate xml from 1st signal output + if (state==IDLE) { + //direct call for testing + float framerate=25.0f; + if (command.size()>2) { + framerate=ofToFloat(command[3]); + } + string signal_xml; + if (graph.signal_render(signal_xml,framerate)){ + response.status=HTTPResponse::HTTP_OK; + response.description=signal_xml; + } + else { + response.status=HTTPResponse::HTTP_INTERNAL_SERVER_ERROR; + response.description="<status context='"+command[1]+"'>Rotor: could not render output signal</status>\n"; + } + } + else { + response.status=HTTPResponse::HTTP_NOT_FOUND; + response.description="<status context='"+command[1]+"'>Signal output not found</status>\n"; + } + } + else { + response.status=HTTPResponse::HTTP_SERVICE_UNAVAILABLE; + response.description="<status context='"+command[1]+"'>Rotor: context busy</status>\n"; + } + } + if (command[2]=="video") { + if (command[0]=="GET") { + //DUMMY RESPONSE + response.status=HTTPResponse::HTTP_OK; + response.description="<status context='"+command[1]+"'>DUMMY RESPONSE Rotor: analysing video</status>\n"; + response.description+="<progress>45.2</progress>\n"; + } + if (command[0]=="PUT") { //get vide file location and initiate analysis + if (command.size()>2) { + if (state==IDLE) { + //check file exists + Poco::File f=Poco::File(command[3]); + if (f.exists()) { + //pass to worker thread ??if engine is ready?? ??what if engine has finished but results aren't read?? + //DUMMY RESPONSE + response.description="<status context='"+command[1]+"'>DUMMY RESPONSE Starting video analysis: "+command[3]+"</status>\n"; + } + else { + response.status=HTTPResponse::HTTP_NOT_FOUND; + response.description="<status context='"+command[1]+"'>File "+command[3]+" not found</status>\n"; + } + } + else { + response.status=HTTPResponse::HTTP_BAD_REQUEST; + response.description="<status context='"+command[1]+"'>Rotor: session busy</status>\n"; + } + } + } + if (command[0]=="DELETE") { + //DUMMY RESPONSE + response.description="<status>DUMMY RESPONSE 1</status>\n"; + response.status=HTTPResponse::HTTP_OK; + } + + } + if (command[2]=="render") { + if (command[0]=="GET") { + //DUMMY RESPONSE + response.status=HTTPResponse::HTTP_OK; + response.description="<status context='"+command[1]+"'>DUMMY RESPONSE Rotor: rendering video</status>\n"; + response.description+="<progress>25.2</progress>\n"; + } + if (command[0]=="PUT") { + if (command.size()>2) { + if (state==IDLE) { + output_filename=command[3]; + if (command.size()>3) { +// output_framerate=ofToFloat(command[4]); + } + add_queue(RENDER); + response.status=HTTPResponse::HTTP_OK; + response.description="<status context='"+command[1]+"'>Starting render: "+command[3]+"</status>\n"; + } + else { + response.status=HTTPResponse::HTTP_BAD_REQUEST; + response.description="<status context='"+command[1]+"'>Rotor: session busy</status>\n"; + } + } + else { + response.status=HTTPResponse::HTTP_BAD_REQUEST; + response.description="<status context='"+command[1]+"'>Rotor: no output file specified</status>\n"; + } + } + if (command[0]=="DELETE") { + //DUMMY RESPONSE + //SHOULD CHECK REQUIREMENTS + response.status=HTTPResponse::HTTP_OK; + response.description="<status context='"+command[1]+"'>DUMMY RESPONSE Rotor: cancelling render</status>\n"; + } + } + return response; +} + +//http://blog.tomaka17.com/2012/03/libavcodeclibavformat-tutorial/ +//great to use c++11 features + +bool Render_context::load_audio(const string &filename,vector<Base_audio_processor*> processors){ + + av_register_all(); + + AVFrame* frame = avcodec_alloc_frame(); + if (!frame) + { + std::cout << "Error allocating the frame" << std::endl; + return false; + } + + AVFormatContext* formatContext = NULL; + if (avformat_open_input(&formatContext, filename.c_str(), NULL, NULL) != 0) + { + av_free(frame); + std::cout << "Error opening the file" << std::endl; + return false; + } + + + if (avformat_find_stream_info(formatContext, NULL) < 0) + { + av_free(frame); + avformat_close_input(&formatContext); + std::cout << "Error finding the stream info" << std::endl; + return false; + } + + AVStream* audioStream = NULL; + for (unsigned int i = 0; i < formatContext->nb_streams; ++i) + { + if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) + { + audioStream = formatContext->streams[i]; + break; + } + } + + if (audioStream == NULL) + { + av_free(frame); + avformat_close_input(&formatContext); + std::cout << "Could not find any audio stream in the file" << std::endl; + return false; + } + + AVCodecContext* codecContext = audioStream->codec; + + codecContext->codec = avcodec_find_decoder(codecContext->codec_id); + if (codecContext->codec == NULL) + { + av_free(frame); + avformat_close_input(&formatContext); + std::cout << "Couldn't find a proper decoder" << std::endl; + return false; + } + else if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0) + { + av_free(frame); + avformat_close_input(&formatContext); + std::cout << "Couldn't open the context with the decoder" << std::endl; + return false; + } + + av_dump_format(formatContext, 0, 0, false); //avformat.h line 1256 + int samples = ((formatContext->duration + 5000)*codecContext->sample_rate)/AV_TIME_BASE; + graph.duration=((float)formatContext->duration)/AV_TIME_BASE; + + std::cout << "This stream has " << codecContext->channels << " channels, a sample rate of " << codecContext->sample_rate << "Hz and "<<samples <<" samples" << std::endl; + std::cout << "The data is in format " <<codecContext->sample_fmt<< " (aka "<< av_get_sample_fmt_name(codecContext->sample_fmt) << ") "<<std::endl; + + for (auto p: processors) { + if(!p->init(codecContext->channels,16,samples,codecContext->sample_rate) ){ + cerr<<"Plugin failed to initialse"<<endl; + return false; + } + } + + AVPacket packet; + av_init_packet(&packet); + int sample_processed=0; + + while (true) + { + int ret=av_read_frame(formatContext, &packet); + if (ret<0) { + cerr << "finished with code "<<ret <<(ret==AVERROR_EOF?" ,EOF":"")<<endl; + break; + } + if (packet.stream_index == audioStream->index) + { + // Try to decode the packet into a frame + int frameFinished = 0; + //int bytes = + avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); + + // Some frames rely on multiple packets, so we have to make sure the frame is finished before + // we can use it + if (frameFinished) + { + // frame now has usable audio data in it. How it's stored in the frame depends on the format of + // the audio. If it's packed audio, all the data will be in frame->data[0]. If it's in planar format, + // the data will be in frame->data and possibly frame->extended_data. Look at frame->data, frame->nb_samples, + // frame->linesize, and other related fields on the FFmpeg docs. I don't know how you're actually using + // the audio data, so I won't add any junk here that might confuse you. Typically, if I want to find + // documentation on an FFmpeg structure or function, I just type "<name> doxygen" into google (like + // "AVFrame doxygen" for AVFrame's docs) + + //av_get_channel_layout_string (char *buf, int buf_size, int nb_channels, uint64_t channel_layout) + + + //now we can pass the data to the processor(s) + for (auto p: processors) { + p->process_frame(frame->data[0],frame->nb_samples); + } + sample_processed+=frame->nb_samples; + mutex.lock(); + progress=((double)sample_processed)/samples; + mutex.unlock(); + } + } + // You *must* call av_free_packet() after each call to av_read_frame() or else you'll leak memory + av_free_packet(&packet); + } + + // Some codecs will cause frames to be buffered up in the decoding process. If the CODEC_CAP_DELAY flag + // is set, there can be buffered up frames that need to be flushed, so we'll do that + if (codecContext->codec->capabilities & CODEC_CAP_DELAY) + { + av_init_packet(&packet); + // Decode all the remaining frames in the buffer, until the end is reached + int frameFinished = 0; + int bytes = avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); + while (bytes >= 0 && frameFinished) + { + for (auto p: processors) { + p->process_frame(frame->data[0],frame->nb_samples); + } + mutex.lock(); + progress=((double)sample_processed)/samples; + mutex.unlock(); + } + } + + cerr << "finished processing: "<<sample_processed << " samples of "<<samples<<", "<<((double)sample_processed*100)/samples<<"%"<< std::endl; + + // Clean up! + for (auto p: processors) { + p->cleanup(); + } + + + av_free(frame); + avcodec_close(codecContext); + avformat_close_input(&formatContext); + + return true; +} +bool Render_context::load_video(string nodeID,string &filename){ +} diff --git a/rotord/rotor.cpp b/rotord/rotor.cpp index da8f811..4249b08 100755 --- a/rotord/rotor.cpp +++ b/rotord/rotor.cpp @@ -9,51 +9,17 @@ bool fequal(const float u,const float v){ using namespace Rotor; - -void Render_context::runTask() { - while (!isCancelled()) { - int cmd=0; - mutex.lock(); - if (work_queue.size()){ - cmd=work_queue[0]; - work_queue.pop_front(); - } - mutex.unlock(); - if(cmd==ANALYSE_AUDIO) { - state=ANALYSING_AUDIO; - vector<Base_audio_processor*> processors; - processors.push_back(audio_thumb); - vector<Node*> analysers=graph.find_nodes("audio_analysis"); - for (auto a: analysers) { - processors.push_back(dynamic_cast<Base_audio_processor*>(a)); - } - if (load_audio(audio_filename,processors)) { - state=AUDIO_READY; - } - else { - //an error occurred: TODO have to clean up allocated data. autoptr? - state=IDLE; - } - } - if(cmd==RENDER) { - state=RENDERING; - if(graph.video_render(output_filename,audio_filename,output_framerate)){ - state=RENDER_READY; - } - else { - //an error occurred: TODO have to clean up allocated data. autoptr? - state=IDLE; - } - } - sleep(100); - } - printf("Rotor: stopping thread\n"); -} -void Render_context::add_queue(int item) { - mutex.lock(); - work_queue.push_back(item); - mutex.unlock(); +Node_factory::Node_factory(){ + //for now, statically load prototype map in constructor + add_type("audio_analysis",new Audio_analysis()); + add_type("divide",new Signal_divide()); + add_type("bang",new Is_new_integer()); + add_type("signal_output",new Signal_output()); + add_type("testcard",new Testcard()); + add_type("video_output",new Video_output()); + add_type("video_input",new Video_input()); } + bool Signal_input::connect(Signal_node* source) { if (source->output_type=="signal") { connection=(Node*)source; @@ -83,451 +49,6 @@ bool Signal_output::render(const float duration, const float framerate,string &x return true; } -Command_response Render_context::session_command(const std::vector<std::string>& command){ - //method,id,command1,{command2,}{body} - //here we allow the controlling server to communicate with running tasks - Command_response response; - response.status=HTTPResponse::HTTP_BAD_REQUEST; - if (command[2]=="audio") { - if (command[0]=="PUT") { //get audio file location and initiate analysis - if (command.size()>2) { - if (state==IDLE) { - //check file exists - Poco::File f=Poco::File(command[3]); - if (f.exists()) { - //pass to worker thread ??if engine is ready?? ??what if engine has finished but results aren't read?? - audio_filename=command[3]; //for now, store session variables in memory - add_queue(ANALYSE_AUDIO); - response.status=HTTPResponse::HTTP_OK; - response.description="<status context='"+command[1]+"'>Starting audio analysis: "+command[3]+"</status>\n"; - } - else { - response.status=HTTPResponse::HTTP_NOT_FOUND; - response.description="<status context='"+command[1]+"'>File "+command[3]+" not found</status>\n"; - } - - } - else { - response.status=HTTPResponse::HTTP_BAD_REQUEST; - response.description="<status context='"+command[1]+"'>Rotor: session busy</status>\n"; - } - } - } - if (command[0]=="GET") { - if (state==ANALYSING_AUDIO) { - response.status=HTTPResponse::HTTP_OK; - response.description="<status context='"+command[1]+"'>Rotor: analysing audio</status>\n"; - char c[20]; - sprintf(c,"%02f",progress); - response.description+="<progress>"+string(c)+"</progress>\n"; - } - if (state==AUDIO_READY) { - //not sure about this-- should this state be retained? - //can the data only be read once? - //for now - response.status=HTTPResponse::HTTP_OK; - response.description="<status context='"+command[1]+"'>Rotor: audio ready</status>\n"; - response.description+="<audio>\n"; - response.description+=audio_thumb->print(); - response.description+="</audio>"; - state=IDLE; - } - } - if (command[0]=="DELETE") { - //for now - audio_filename=""; - response.description="<status>1</status>\n"; - response.status=HTTPResponse::HTTP_OK; - } - } - if (command[2]=="graph") { - if (command[0]=="GET") { - if (graph.loaded) { - response.status=HTTPResponse::HTTP_OK; - response.description=graph.toString(); - } - else { - response.description="<status>Rotor: graph not loaded</status>\n"; - } - } - if (command[0]=="PUT") { //get new graph from file - if (command.size()>2) { - //should interrupt whatever is happening? - //before begining to load from xml - if (state==IDLE) { //eventually not like this - Poco::File f=Poco::File(command[3]); - if (f.exists()) { - string graph_filename=command[3]; - if (graph.load(graph_filename)) { - response.status=HTTPResponse::HTTP_OK; - //response.description="<status context='"+command[1]+"'>Rotor: loaded graph "+command[3]+"</status>\n"; - response.description=graph.toString(); - //the graph could actually contain an xml object and we could just print it here? - //or could our nodes even be subclassed from xml nodes? - //the graph or the audio could load first- have to analyse the audio with vamp after the graph is loaded - //for now the graph must load 1st - } - else { - response.status=HTTPResponse::HTTP_INTERNAL_SERVER_ERROR; //~/sources/poco-1.4.6-all/Net/include/Poco/Net/HTTPResponse.h - response.description="<status context='"+command[1]+"'>Rotor: could not load graph "+command[3]+"</status>\n"; - } - } - else { - response.status=HTTPResponse::HTTP_NOT_FOUND; - response.description="<status context='"+command[1]+"'>File "+command[3]+" not found</status>\n"; - } - } - } - } - if (command[0]=="DELETE") { - //for now - graph=Graph(); - response.description="<status>1</status>\n"; - response.status=HTTPResponse::HTTP_OK; - } - } - if (command[2]=="signal") { - if (command[0]=="GET") { //generate xml from 1st signal output - if (state==IDLE) { - //direct call for testing - float framerate=25.0f; - if (command.size()>2) { - framerate=ofToFloat(command[3]); - } - string signal_xml; - if (graph.signal_render(signal_xml,framerate)){ - response.status=HTTPResponse::HTTP_OK; - response.description=signal_xml; - } - else { - response.status=HTTPResponse::HTTP_INTERNAL_SERVER_ERROR; - response.description="<status context='"+command[1]+"'>Rotor: could not render output signal</status>\n"; - } - } - else { - response.status=HTTPResponse::HTTP_NOT_FOUND; - response.description="<status context='"+command[1]+"'>Signal output not found</status>\n"; - } - } - else { - response.status=HTTPResponse::HTTP_SERVICE_UNAVAILABLE; - response.description="<status context='"+command[1]+"'>Rotor: context busy</status>\n"; - } - } - if (command[2]=="video") { - if (command[0]=="GET") { - //DUMMY RESPONSE - response.status=HTTPResponse::HTTP_OK; - response.description="<status context='"+command[1]+"'>DUMMY RESPONSE Rotor: analysing video</status>\n"; - response.description+="<progress>45.2</progress>\n"; - } - if (command[0]=="PUT") { //get vide file location and initiate analysis - if (command.size()>2) { - if (state==IDLE) { - //check file exists - Poco::File f=Poco::File(command[3]); - if (f.exists()) { - //pass to worker thread ??if engine is ready?? ??what if engine has finished but results aren't read?? - //DUMMY RESPONSE - response.description="<status context='"+command[1]+"'>DUMMY RESPONSE Starting video analysis: "+command[3]+"</status>\n"; - } - else { - response.status=HTTPResponse::HTTP_NOT_FOUND; - response.description="<status context='"+command[1]+"'>File "+command[3]+" not found</status>\n"; - } - } - else { - response.status=HTTPResponse::HTTP_BAD_REQUEST; - response.description="<status context='"+command[1]+"'>Rotor: session busy</status>\n"; - } - } - } - if (command[0]=="DELETE") { - //DUMMY RESPONSE - response.description="<status>DUMMY RESPONSE 1</status>\n"; - response.status=HTTPResponse::HTTP_OK; - } - - } - if (command[2]=="render") { - if (command[0]=="GET") { - //DUMMY RESPONSE - response.status=HTTPResponse::HTTP_OK; - response.description="<status context='"+command[1]+"'>DUMMY RESPONSE Rotor: rendering video</status>\n"; - response.description+="<progress>25.2</progress>\n"; - } - if (command[0]=="PUT") { - if (command.size()>2) { - if (state==IDLE) { - output_filename=command[3]; - if (command.size()>3) { -// output_framerate=ofToFloat(command[4]); - } - add_queue(RENDER); - response.status=HTTPResponse::HTTP_OK; - response.description="<status context='"+command[1]+"'>Starting render: "+command[3]+"</status>\n"; - } - else { - response.status=HTTPResponse::HTTP_BAD_REQUEST; - response.description="<status context='"+command[1]+"'>Rotor: session busy</status>\n"; - } - } - else { - response.status=HTTPResponse::HTTP_BAD_REQUEST; - response.description="<status context='"+command[1]+"'>Rotor: no output file specified</status>\n"; - } - } - if (command[0]=="DELETE") { - //DUMMY RESPONSE - //SHOULD CHECK REQUIREMENTS - response.status=HTTPResponse::HTTP_OK; - response.description="<status context='"+command[1]+"'>DUMMY RESPONSE Rotor: cancelling render</status>\n"; - } - } - return response; -} - -//http://blog.tomaka17.com/2012/03/libavcodeclibavformat-tutorial/ -//great to use c++11 features - -bool Render_context::load_audio(const string &filename,vector<Base_audio_processor*> processors){ - - av_register_all(); - - AVFrame* frame = avcodec_alloc_frame(); - if (!frame) - { - std::cout << "Error allocating the frame" << std::endl; - return false; - } - - AVFormatContext* formatContext = NULL; - if (avformat_open_input(&formatContext, filename.c_str(), NULL, NULL) != 0) - { - av_free(frame); - std::cout << "Error opening the file" << std::endl; - return false; - } - - - if (avformat_find_stream_info(formatContext, NULL) < 0) - { - av_free(frame); - avformat_close_input(&formatContext); - std::cout << "Error finding the stream info" << std::endl; - return false; - } - - AVStream* audioStream = NULL; - for (unsigned int i = 0; i < formatContext->nb_streams; ++i) - { - if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) - { - audioStream = formatContext->streams[i]; - break; - } - } - - if (audioStream == NULL) - { - av_free(frame); - avformat_close_input(&formatContext); - std::cout << "Could not find any audio stream in the file" << std::endl; - return false; - } - - AVCodecContext* codecContext = audioStream->codec; - - codecContext->codec = avcodec_find_decoder(codecContext->codec_id); - if (codecContext->codec == NULL) - { - av_free(frame); - avformat_close_input(&formatContext); - std::cout << "Couldn't find a proper decoder" << std::endl; - return false; - } - else if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0) - { - av_free(frame); - avformat_close_input(&formatContext); - std::cout << "Couldn't open the context with the decoder" << std::endl; - return false; - } - - av_dump_format(formatContext, 0, 0, false); //avformat.h line 1256 - int samples = ((formatContext->duration + 5000)*codecContext->sample_rate)/AV_TIME_BASE; - graph.duration=((float)formatContext->duration)/AV_TIME_BASE; - - std::cout << "This stream has " << codecContext->channels << " channels, a sample rate of " << codecContext->sample_rate << "Hz and "<<samples <<" samples" << std::endl; - std::cout << "The data is in format " <<codecContext->sample_fmt<< " (aka "<< av_get_sample_fmt_name(codecContext->sample_fmt) << ") "<<std::endl; - - for (auto p: processors) { - if(!p->init(codecContext->channels,16,samples,codecContext->sample_rate) ){ - cerr<<"Plugin failed to initialse"<<endl; - return false; - } - } - - AVPacket packet; - av_init_packet(&packet); - int sample_processed=0; - - while (true) - { - int ret=av_read_frame(formatContext, &packet); - if (ret<0) { - cerr << "finished with code "<<ret <<(ret==AVERROR_EOF?" ,EOF":"")<<endl; - break; - } - if (packet.stream_index == audioStream->index) - { - // Try to decode the packet into a frame - int frameFinished = 0; - //int bytes = - avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); - - // Some frames rely on multiple packets, so we have to make sure the frame is finished before - // we can use it - if (frameFinished) - { - // frame now has usable audio data in it. How it's stored in the frame depends on the format of - // the audio. If it's packed audio, all the data will be in frame->data[0]. If it's in planar format, - // the data will be in frame->data and possibly frame->extended_data. Look at frame->data, frame->nb_samples, - // frame->linesize, and other related fields on the FFmpeg docs. I don't know how you're actually using - // the audio data, so I won't add any junk here that might confuse you. Typically, if I want to find - // documentation on an FFmpeg structure or function, I just type "<name> doxygen" into google (like - // "AVFrame doxygen" for AVFrame's docs) - - //av_get_channel_layout_string (char *buf, int buf_size, int nb_channels, uint64_t channel_layout) - - - //now we can pass the data to the processor(s) - for (auto p: processors) { - p->process_frame(frame->data[0],frame->nb_samples); - } - sample_processed+=frame->nb_samples; - mutex.lock(); - progress=((double)sample_processed)/samples; - mutex.unlock(); - } - } - // You *must* call av_free_packet() after each call to av_read_frame() or else you'll leak memory - av_free_packet(&packet); - } - - // Some codecs will cause frames to be buffered up in the decoding process. If the CODEC_CAP_DELAY flag - // is set, there can be buffered up frames that need to be flushed, so we'll do that - if (codecContext->codec->capabilities & CODEC_CAP_DELAY) - { - av_init_packet(&packet); - // Decode all the remaining frames in the buffer, until the end is reached - int frameFinished = 0; - int bytes = avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); - while (bytes >= 0 && frameFinished) - { - for (auto p: processors) { - p->process_frame(frame->data[0],frame->nb_samples); - } - mutex.lock(); - progress=((double)sample_processed)/samples; - mutex.unlock(); - } - } - - cerr << "finished processing: "<<sample_processed << " samples of "<<samples<<", "<<((double)sample_processed*100)/samples<<"%"<< std::endl; - - // Clean up! - for (auto p: processors) { - p->cleanup(); - } - - - av_free(frame); - avcodec_close(codecContext); - avformat_close_input(&formatContext); - - return true; -} -const string Graph::toString(){ - string xmlgraph; - if (loaded) { - xml.copyXmlToString(xmlgraph); - return xmlgraph; - } - else return ""; -} -bool Graph::load(string &filename){ - loaded=false; - printf("loading graph: %s\n",filename.c_str()); - if(xml.loadFile(filename) ){ - init(xml.getAttribute("patchbay","ID","",0),xml.getValue("patchbay","",0)); - if(xml.pushTag("patchbay")) { - int n1=xml.getNumTags("node"); - for (int i1=0;i1<n1;i1++){ - map<string,string> settings; - vector<string> attrs; - xml.getAttributeNames("node",attrs,i1); - for (auto& attr: attrs) { - settings[attr]=xml.getAttribute("node",attr,"",i1); - //cerr << "Got attribute: " << attr << ":" << xml.getAttribute("node",attr,"",i1) << endl; - } - settings["description"]=xml.getValue("node","",i1); - Node* node=factory.create(settings); - if (node) { - cerr << "Rotor: created '" << xml.getAttribute("node","type","",i1) << "'" << endl; - string nodeID=xml.getAttribute("node","ID","",i1); - nodes[nodeID]=node; - if(xml.pushTag("node",i1)) { - int n2=xml.getNumTags("signal_input"); - for (int i2=0;i2<n2;i2++){ - nodes[nodeID]->create_signal_input(xml.getValue("signal_input","",i2)); - string fromID=xml.getAttribute("signal_input","from","",i2); - if (nodes.find(fromID)!=nodes.end()) { - if (!nodes[nodeID]->inputs[i2]->connect((Signal_node*)nodes[fromID])){ - cerr << "Rotor: graph loader cannot connect input " << i2 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; - return false; - } - else cerr << "Rotor: linked input " << i2 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; - } - else cerr << "Rotor: linking input " << i2 << " of node: '" << nodeID << "', cannot find target '" << fromID << "'" << endl; - } - int n3=xml.getNumTags("image_input"); - for (int i3=0;i3<n3;i3++){ - ((Image_node*)nodes[nodeID])->create_image_input(xml.getValue("image_input","",i3)); - string fromID=xml.getAttribute("image_input","from","",i3); - if (nodes.find(fromID)!=nodes.end()) { - if (!(((Image_node*)nodes[nodeID])->image_inputs[i3]->connect((Image_node*)nodes[fromID]))){ - cerr << "Rotor: graph loader cannot connect image input " << i3 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; - return false; - } - else cerr << "Rotor: linked image input " << i3 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; - } - else cerr << "Rotor: linking image input " << i3 << " of node: '" << nodeID << "', cannot find target '" << fromID << "'" << endl; - } - xml.popTag(); - } - } - else { - cerr << "Rotor: graph loader cannot find node '" << xml.getAttribute("node","type","",i1) << "'" << endl; - return false; - } - } - xml.popTag(); - } - loaded=true; - return true; - } - else return false; -} -Node_factory::Node_factory(){ - //for now, statically load prototype map in constructor - add_type("audio_analysis",new Audio_analysis()); - add_type("divide",new Signal_divide()); - add_type("bang",new Is_new_integer()); - add_type("signal_output",new Signal_output()); - add_type("testcard",new Testcard()); - add_type("video_output",new Video_output()); -} bool Audio_thumbnailer::init(int _channels,int _bits,int _samples,int _rate) { //base_audio_processor::init(_channels,_bits,_samples); channels=_channels; @@ -923,4 +444,3 @@ bool Video_output::render(const float duration, const float framerate,const stri */ - diff --git a/rotord/rotor.h b/rotord/rotor.h index 6ade885..55beb60 100755 --- a/rotord/rotor.h +++ b/rotord/rotor.h @@ -417,6 +417,21 @@ namespace Rotor { libav::Exporter *exporter; libav::Audioloader audioloader; }; + class Video_input: public Image_node { + public: + Video_input(){}; + Video_input(map<string,string> &settings) { + base_settings(settings); + loader=new ofGstVideoUtils(); + }; + ~Video_input(){ delete loader; }; + Image *get_output(const Frame_spec &frame){ + return nullptr; + }; + Video_input* clone(map<string,string> &_settings) { return new Video_input(_settings);}; + private: + ofGstVideoUtils *loader; + }; //------------------------------------------------------------------- class Node_factory{ public: @@ -521,8 +536,7 @@ namespace Rotor { int make_preview(int nodeID, float time); //starts a frame preview - returns status code - how to retrieve? bool load_audio(const string &filename,vector<Base_audio_processor*> processors); Render_requirements get_requirements(); - int load_video(int num,string &filename); //can be performance or clip - + bool load_video(string nodeID,string &filename);//can be performance or clip private: int state; double progress; //for a locking process: audio analysis or rendering diff --git a/rotord/styles.xml b/rotord/styles.xml index 4826567..78087a7 100644 --- a/rotord/styles.xml +++ b/rotord/styles.xml @@ -1,9 +1,9 @@ <styles> - <style ID="0f7aa258-7c2f-11e2-abbd-133252267708" thumbnail="01_thumbnail.jpg" style="01.xml">No Offs - <info>A quick cutting style that cuts the beat and energy of the tune. We recommend this style for a performance based video, which requires two full track length takes of a singer and/or other members of the act or a single take of another action. A minimum of seven other videos should be uploaded to get the most out of animated zooming overlays.</info> - <slot minvideos="1" maxvideos="1" ID="1">This should be a video of a full performance of the song</slot> - <slot minvideos="1" maxvideos="1" ID="2">This should be a video of a full performance of the song</slot> - <slot minvideos="5" maxvideos="-1" ID="10">These should be a sequence of video cutaways</slot> - <slot minvideos="5" maxvideos="-1" ID="11">These should be a sequence of video cutaways</slot> + <style ID="0f7aa258-7c2f-11e2-abbd-133252267708" thumbnail="01_thumbnail.jpg" style=”01.xml”>On & Off + <info>A quick cutting style that cuts to the beat and energy of the tune. ROTOR created this style for a performance based video and we recommend using the Rotor mobile app to shoot a synced performance on your camera device. It requires at least one video running the entire length of the track. If you can’t shoot a synced performance track, then shoot something else. A minimum of seven other mid length videos should be uploaded to get the most out of animated zooming overlays.</info> + <slot minvideos="1" maxvideos="1" ID="1" thumbnail="01_1_thumbnail.jpg">This should be a video of a full performance of the song</slot> + <slot minvideos="1" maxvideos="1" ID="2" thumbnail="01_2_thumbnail.jpg">This should be a video of a full performance of the song</slot> + <slot minvideos="5" maxvideos="-1" ID="10" thumbnail="01_10_thumbnail.jpg">These should be a sequence of video cutaways</slot> + <slot minvideos="5" maxvideos="-1" ID="11" thumbnail="01_11_thumbnail.jpg" >These should be a sequence of video cutaways</slot> </style> </styles> |
