#include "rotor.h" //float equality bool fequal(const float u,const float v){ if (abs(u-v)<.001) return true; else return false; }; using namespace Rotor; /* string soname="qm-vamp-plugins"; string id="qm-tempotracker"; string myname=""; string output=""; int outputNo=0; */ void Render_context::runTask() { while (!isCancelled()) { int cmd=0; mutex.lock(); if (work_queue.size()){ cmd=work_queue[0]; work_queue.pop_front(); } mutex.unlock(); if (cmd==ANALYSE_AUDIO) { state=ANALYSING_AUDIO; //audio_analyser.process(audio_filename); //vampHost::runPlugin("","qm-vamp-plugins","qm-tempotracker", "",0, audio_filename, cerr,true); vector proc; if (load_audio(audio_filename,proc)) { state=AUDIO_READY; } else state=IDLE; } sleep(100); } printf("Rotor: stopping thread\n"); } void Render_context::add_queue(int item) { mutex.lock(); work_queue.push_back(item); mutex.unlock(); } bool Signal_input::connect(Signal_node* source) { if (source->output_type=="signal") { connection=(Node*)source; return true; } else return false; }; bool Signal_output::render(const float duration, const float framerate,string &xml_out){ cerr << "Rotor: Signal_output rendering " << duration << " seconds at " << framerate << " frames per second" << endl; float step=1.0f/framerate; float v=0.0f; for (float f=0.0f;f"+ofToString(u)+"\n"); v=u; } } return true; } Command_response Render_context::session_command(const std::vector& command){ //method,id,command1,{command2,}{body} //here we allow the controlling server to communicate with running tasks Command_response response; response.status=HTTPResponse::HTTP_BAD_REQUEST; if (command[2]=="audio") { if (command[0]=="PUT") { //get audio file location and initiate analysis if (command.size()>2) { if (state==IDLE) { //check file exists Poco::File f=Poco::File(command[3]); //std::auto_ptr pStr(URIStreamOpener::defaultOpener().open(command[3])); if (f.exists()) { //pass to worker thread ??if engine is ready?? ??what if engine has finished but results aren't read?? audio_filename=command[3]; //for now, store session variables in memory add_queue(ANALYSE_AUDIO); response.description="Starting audio analysis: "+command[3]+"\n"; } else { response.status=HTTPResponse::HTTP_NOT_FOUND; response.description="File "+command[3]+" not found\n"; } } else { response.status=HTTPResponse::HTTP_BAD_REQUEST; response.description="Rotor: session busy\n"; } } } if (command[0]=="GET") { if (state==ANALYSING_AUDIO) { response.status=HTTPResponse::HTTP_OK; response.description="Rotor: analysing audio\n"; char c[20]; sprintf(c,"%02f",audio_analyser.get_progress()); response.description+=""+string(c)+"\n"; } if (state==AUDIO_READY) { //not sure about this-- should this state be retained? //can the data only be read once? //for now response.status=HTTPResponse::HTTP_OK; response.description="Rotor: audio ready\n"; response.description+=""; for (auto& i: audio_analyser.beats) { //is actually giving no data? char c[20]; sprintf(c,"%02f",i); response.description+=""+string(c)+""; } response.description+="\n"; state=IDLE; } } if (command[0]=="DELETE") { //for now audio_filename=""; response.description="1\n"; response.status=HTTPResponse::HTTP_OK; } } if (command[2]=="graph") { if (command[0]=="GET") { if (xml.bDocLoaded) { response.status=HTTPResponse::HTTP_OK; xml.copyXmlToString(response.description); } else { response.description="Rotor: graph not loaded\n"; } } if (command[0]=="PUT") { //get new graph from file if (command.size()>2) { //should interrupt whatever is happening? //before begining to load from xml if (state==IDLE) { //eventually not like this Poco::File f=Poco::File(command[3]); if (f.exists()) { string graph_filename=command[3]; if (load_graph(graph_filename)) { response.status=HTTPResponse::HTTP_OK; //response.description="Rotor: loaded graph "+command[3]+"\n"; string xmlstring; xml.copyXmlToString(xmlstring); response.description=xmlstring; //the graph could actually contain an xml object and we could just print it here? //or could our nodes even be subclassed from xml nodes? } else { response.status=HTTPResponse::HTTP_INTERNAL_SERVER_ERROR; //~/sources/poco-1.4.6-all/Net/include/Poco/Net/HTTPResponse.h response.description="Rotor: could not load graph "+command[3]+"\n"; } } else { response.status=HTTPResponse::HTTP_NOT_FOUND; response.description="File "+command[3]+" not found\n"; } } } } if (command[0]=="DELETE") { //for now graph=Graph(); response.description="1\n"; response.status=HTTPResponse::HTTP_OK; } } if (command[2]=="signal") { if (command[0]=="GET") { //generate xml from 1st signal output if (state==IDLE) { //direct call for testing float framerate=0.0f; if (command.size()>2) { framerate=ofToFloat(command[3]); } string signal_xml; if (graph.signal_render(framerate,signal_xml)){ response.status=HTTPResponse::HTTP_OK; response.description=signal_xml; } else { response.status=HTTPResponse::HTTP_INTERNAL_SERVER_ERROR; response.description="Rotor: could not render output signal\n"; } } else { response.status=HTTPResponse::HTTP_NOT_FOUND; response.description="Signal output not found\n"; } } else { response.status=HTTPResponse::HTTP_SERVICE_UNAVAILABLE; response.description="Rotor: context busy\n"; } } if (command[2]=="video") { if (command[0]=="GET") { //DUMMY RESPONSE response.status=HTTPResponse::HTTP_OK; response.description="DUMMY RESPONSE Rotor: analysing video\n"; response.description+="45.2\n"; } if (command[0]=="PUT") { //get vide file location and initiate analysis if (command.size()>2) { if (state==IDLE) { //check file exists Poco::File f=Poco::File(command[3]); if (f.exists()) { //pass to worker thread ??if engine is ready?? ??what if engine has finished but results aren't read?? //DUMMY RESPONSE response.description="DUMMY RESPONSE Starting video analysis: "+command[3]+"\n"; } else { response.status=HTTPResponse::HTTP_NOT_FOUND; response.description="File "+command[3]+" not found\n"; } } else { response.status=HTTPResponse::HTTP_BAD_REQUEST; response.description="Rotor: session busy\n"; } } } if (command[0]=="DELETE") { //DUMMY RESPONSE response.description="DUMMY RESPONSE 1\n"; response.status=HTTPResponse::HTTP_OK; } } if (command[2]=="render") { if (command[0]=="GET") { //DUMMY RESPONSE response.status=HTTPResponse::HTTP_OK; response.description="DUMMY RESPONSE Rotor: rendering video\n"; response.description+="25.2\n"; } if (command[0]=="PUT") { //DUMMY RESPONSE //SHOULD CHECK REQUIREMENTS response.status=HTTPResponse::HTTP_OK; response.description="DUMMY RESPONSE Rotor: starting render\n"; } if (command[0]=="DELETE") { //DUMMY RESPONSE //SHOULD CHECK REQUIREMENTS response.status=HTTPResponse::HTTP_OK; response.description="DUMMY RESPONSE Rotor: cancelling render\n"; } } return response; } //http://blog.tomaka17.com/2012/03/libavcodeclibavformat-tutorial/ //great to use c++11 features bool Render_context::load_audio(const string &filename,vector processors){ //load audio data from file //what's the best way to use this? the model is background processing, and we want to update a progress bar //could pass a function pointer to call when each chunk of data becomes available? //should the data processing be the responsibility of an object or a function? //in the case of the audio thumbnail, there will be just one place where its kept //in the case of audio analysis, the daemon will pass each audio analysis object each chunk of data as it gets it //there could even be an array of audio analysis functions to perform simultaneously? //how about a vector of objects that subclass the base audio processor class? //1st get parameters and initialise the processors //then begin data loop locking progress variable after each frame // // //the example in ffmpeg works, but it isn't one that identifies a codec- it is hard coded to look for a codec for AV_CODEC_ID_MP2 //it also doesn't load through libavformat - which opens containers- it just loads a naked .mp2 stream // // av_register_all(); std::shared_ptr avFormat(avformat_alloc_context(), &avformat_free_context); auto avFormatPtr = avFormat.get(); if (avformat_open_input(&avFormatPtr,filename.c_str(),nullptr, nullptr) != 0) { cerr <<"Rotor: Error while calling avformat_open_input (probably invalid file format)" << endl; return false; } if (avformat_find_stream_info(avFormat.get(), nullptr) < 0) { cerr << "Rotor: Error while calling avformat_find_stream_info" << endl; return false; } av_dump_format(avFormat.get(), 0, 0, false); //avformat.h line 1256 AVStream* stream = nullptr; for (unsigned int i = 0; i < avFormat->nb_streams; ++i) { if (avFormat->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { // we've found a video stream! stream = avFormat->streams[i]; break; } } if (!stream) { cerr <<"Rotor: Didn't find any audio stream in the file"<< endl; return false; } // getting the required codec structure const auto codec = avcodec_find_decoder(stream->codec->codec_id); //returns AVCodec* if (codec == nullptr) { cerr <<"Rotor: Audio codec not available"<< endl; return false; } //AVCodecContext?? avFormat->streams[i]->codec // allocating a structure std::shared_ptr audioCodec(avcodec_alloc_context3(codec), [](AVCodecContext* c) { avcodec_close(c); av_free(c); }); /* extradata??? // we need to make a copy of videoStream->codec->extradata and give it to the context // make sure that this vector exists as long as the avVideoCodec exists std::vector codecContextExtraData(stream->codec->extradata, stream->codec->extradata + stream->codec->extradata_size); audioCodec->extradata = reinterpret_cast(codecContextExtraData.data()); audioCodec->extradata_size = codecContextExtraData.size(); // initializing the structure by opening the codec if (avcodec_open2(avVideoCodec.get(), codec, nullptr) < 0) { cerr <<"Rotor: Could not open codec"<< endl; return false; } */ //avcodec.h line 1026 Packet packet(avFormat.get()); if (packet.packet.data == nullptr) { //done return true; } cerr << "audio codec context - sample rate: "<< audioCodec->sample_rate <<", channels: "<channels<<", sample format: "<sample_fmt<index) continue; // the packet is not about the video stream we want, let's jump again the start of the loop } while(0); // allocating an AVFrame std::shared_ptr avFrame(avcodec_alloc_frame(), &av_free); // the current packet of data //Packet packet; // data in the packet of data already processed size_t offsetInData = 0; bool foundPacket=false; // the decoding loop, running until EOF while (true) { // reading a packet using libavformat if (offsetInData >= packet.packet.size) { do { packet.reset(avFormat.get()); if (packet.packet.stream_index != stream->index) continue; } while(0); if (!foundPacket){ cerr << "audio codec context - sample rate: "<< audioCodec->sample_rate <<", channels: "<channels<<", sample format: "<sample_fmt<ticks_per_frame * 1000 * avVideoContext->time_base.num / avVideoContext->time_base.den; std::this_thread::sleep(std::chrono::milliseconds(msToWait)); } */ } return true; } bool Render_context::load_graph(string &filename){ printf("loading %s\n",filename.c_str()); if(xml.loadFile(filename) ){ graph=Graph(xml.getAttribute("patchbay","ID","",0),xml.getValue("patchbay","",0)); if(xml.pushTag("patchbay")) { int n1=xml.getNumTags("node"); for (int i1=0;i1 settings; vector attrs; xml.getAttributeNames("node",attrs,i1); for (auto& attr: attrs) { settings[attr]=xml.getAttribute("node",attr,"",i1); //cerr << "Got attribute: " << attr << ":" << xml.getAttribute("node",attr,"",i1) << endl; } settings["description"]=xml.getValue("node","",i1); Node* node=factory.create(settings); if (node) { cerr << "Rotor: created '" << xml.getAttribute("node","type","",i1) << "'" << endl; string nodeID=xml.getAttribute("node","ID","",i1); graph.nodes[nodeID]=node; if(xml.pushTag("node",i1)) { int n2=xml.getNumTags("signal_input"); for (int i2=0;i2create_signal_input(xml.getValue("signal_input","",i2)); string fromID=xml.getAttribute("signal_input","from","",i2); if (graph.nodes.find(fromID)!=graph.nodes.end()) { if (!graph.nodes[nodeID]->inputs[i2]->connect((Signal_node*)graph.nodes[fromID])){ cerr << "Rotor: graph loader cannot connect input " << i2 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; return false; } else cerr << "Rotor: linked input " << i2 << " of node '" << nodeID << "' to node '" << fromID << "'" << endl; } else cerr << "Rotor: linking input " << i2 << " of node: '" << nodeID << "', cannot find target '" << fromID << "'" << endl; } xml.popTag(); } } else { cerr << "Rotor: graph loader cannot find node '" << xml.getAttribute("node","type","",i1) << "'" << endl; return false; } } xml.popTag(); } /* model_name=XML.getAttribute("map4","model","none",0); model_x=ofToFloat(XML.getAttribute("map4","x","none",0)); model_y=ofToFloat(XML.getAttribute("map4","y","none",0)); model_z=ofToFloat(XML.getAttribute("map4","z","none",0)); if(XML.pushTag("map4")) { numViews=XML.getNumTags("view"); if(numViews) { views=new viewpoint[numViews]; for (int i=0;ikeys; XML.getAttributeNames("settings", keys, 0); mapsettings; for (int k=0;k