From bee668c819c4d83c7492e296afb3a90ea1cd6d06 Mon Sep 17 00:00:00 2001 From: Tim Redfern Date: Thu, 25 Apr 2013 15:43:19 +0100 Subject: file reorganisation --- rotord/rendercontext.cpp | 414 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 414 insertions(+) create mode 100644 rotord/rendercontext.cpp (limited to 'rotord/rendercontext.cpp') diff --git a/rotord/rendercontext.cpp b/rotord/rendercontext.cpp new file mode 100644 index 0000000..dfb9021 --- /dev/null +++ b/rotord/rendercontext.cpp @@ -0,0 +1,414 @@ +#include "rotor.h" + +using namespace Rotor; +void Render_context::runTask() { + while (!isCancelled()) { + int cmd=0; + mutex.lock(); + if (work_queue.size()){ + cmd=work_queue[0]; + work_queue.pop_front(); + } + mutex.unlock(); + if(cmd==ANALYSE_AUDIO) { + state=ANALYSING_AUDIO; + vector processors; + processors.push_back(audio_thumb); + vector analysers=graph.find_nodes("audio_analysis"); + for (auto a: analysers) { + processors.push_back(dynamic_cast(a)); + } + if (load_audio(audio_filename,processors)) { + state=AUDIO_READY; + } + else { + //an error occurred: TODO have to clean up allocated data. autoptr? + state=IDLE; + } + } + if(cmd==RENDER) { + state=RENDERING; + if(graph.video_render(output_filename,audio_filename,output_framerate)){ + state=RENDER_READY; + } + else { + //an error occurred: TODO have to clean up allocated data. autoptr? + state=IDLE; + } + } + sleep(100); + } + printf("Rotor: stopping thread\n"); +} +void Render_context::add_queue(int item) { + mutex.lock(); + work_queue.push_back(item); + mutex.unlock(); +} +Command_response Render_context::session_command(const std::vector& command){ + //method,id,command1,{command2,}{body} + //here we allow the controlling server to communicate with running tasks + Command_response response; + response.status=HTTPResponse::HTTP_BAD_REQUEST; + if (command[2]=="audio") { + if (command[0]=="PUT") { //get audio file location and initiate analysis + if (command.size()>2) { + if (state==IDLE) { + //check file exists + Poco::File f=Poco::File(command[3]); + if (f.exists()) { + //pass to worker thread ??if engine is ready?? ??what if engine has finished but results aren't read?? + audio_filename=command[3]; //for now, store session variables in memory + add_queue(ANALYSE_AUDIO); + response.status=HTTPResponse::HTTP_OK; + response.description="Starting audio analysis: "+command[3]+"\n"; + } + else { + response.status=HTTPResponse::HTTP_NOT_FOUND; + response.description="File "+command[3]+" not found\n"; + } + + } + else { + response.status=HTTPResponse::HTTP_BAD_REQUEST; + response.description="Rotor: session busy\n"; + } + } + } + if (command[0]=="GET") { + if (state==ANALYSING_AUDIO) { + response.status=HTTPResponse::HTTP_OK; + response.description="Rotor: analysing audio\n"; + char c[20]; + sprintf(c,"%02f",progress); + response.description+=""+string(c)+"\n"; + } + if (state==AUDIO_READY) { + //not sure about this-- should this state be retained? + //can the data only be read once? + //for now + response.status=HTTPResponse::HTTP_OK; + response.description="Rotor: audio ready\n"; + response.description+=""; + state=IDLE; + } + } + if (command[0]=="DELETE") { + //for now + audio_filename=""; + response.description="1\n"; + response.status=HTTPResponse::HTTP_OK; + } + } + if (command[2]=="graph") { + if (command[0]=="GET") { + if (graph.loaded) { + response.status=HTTPResponse::HTTP_OK; + response.description=graph.toString(); + } + else { + response.description="Rotor: graph not loaded\n"; + } + } + if (command[0]=="PUT") { //get new graph from file + if (command.size()>2) { + //should interrupt whatever is happening? + //before begining to load from xml + if (state==IDLE) { //eventually not like this + Poco::File f=Poco::File(command[3]); + if (f.exists()) { + string graph_filename=command[3]; + if (graph.load(graph_filename)) { + response.status=HTTPResponse::HTTP_OK; + //response.description="Rotor: loaded graph "+command[3]+"\n"; + response.description=graph.toString(); + //the graph could actually contain an xml object and we could just print it here? + //or could our nodes even be subclassed from xml nodes? + //the graph or the audio could load first- have to analyse the audio with vamp after the graph is loaded + //for now the graph must load 1st + } + else { + response.status=HTTPResponse::HTTP_INTERNAL_SERVER_ERROR; //~/sources/poco-1.4.6-all/Net/include/Poco/Net/HTTPResponse.h + response.description="Rotor: could not load graph "+command[3]+"\n"; + } + } + else { + response.status=HTTPResponse::HTTP_NOT_FOUND; + response.description="File "+command[3]+" not found\n"; + } + } + } + } + if (command[0]=="DELETE") { + //for now + graph=Graph(); + response.description="1\n"; + response.status=HTTPResponse::HTTP_OK; + } + } + if (command[2]=="signal") { + if (command[0]=="GET") { //generate xml from 1st signal output + if (state==IDLE) { + //direct call for testing + float framerate=25.0f; + if (command.size()>2) { + framerate=ofToFloat(command[3]); + } + string signal_xml; + if (graph.signal_render(signal_xml,framerate)){ + response.status=HTTPResponse::HTTP_OK; + response.description=signal_xml; + } + else { + response.status=HTTPResponse::HTTP_INTERNAL_SERVER_ERROR; + response.description="Rotor: could not render output signal\n"; + } + } + else { + response.status=HTTPResponse::HTTP_NOT_FOUND; + response.description="Signal output not found\n"; + } + } + else { + response.status=HTTPResponse::HTTP_SERVICE_UNAVAILABLE; + response.description="Rotor: context busy\n"; + } + } + if (command[2]=="video") { + if (command[0]=="GET") { + //DUMMY RESPONSE + response.status=HTTPResponse::HTTP_OK; + response.description="DUMMY RESPONSE Rotor: analysing video\n"; + response.description+="45.2\n"; + } + if (command[0]=="PUT") { //get vide file location and initiate analysis + if (command.size()>2) { + if (state==IDLE) { + //check file exists + Poco::File f=Poco::File(command[3]); + if (f.exists()) { + //pass to worker thread ??if engine is ready?? ??what if engine has finished but results aren't read?? + //DUMMY RESPONSE + response.description="DUMMY RESPONSE Starting video analysis: "+command[3]+"\n"; + } + else { + response.status=HTTPResponse::HTTP_NOT_FOUND; + response.description="File "+command[3]+" not found\n"; + } + } + else { + response.status=HTTPResponse::HTTP_BAD_REQUEST; + response.description="Rotor: session busy\n"; + } + } + } + if (command[0]=="DELETE") { + //DUMMY RESPONSE + response.description="DUMMY RESPONSE 1\n"; + response.status=HTTPResponse::HTTP_OK; + } + + } + if (command[2]=="render") { + if (command[0]=="GET") { + //DUMMY RESPONSE + response.status=HTTPResponse::HTTP_OK; + response.description="DUMMY RESPONSE Rotor: rendering video\n"; + response.description+="25.2\n"; + } + if (command[0]=="PUT") { + if (command.size()>2) { + if (state==IDLE) { + output_filename=command[3]; + if (command.size()>3) { +// output_framerate=ofToFloat(command[4]); + } + add_queue(RENDER); + response.status=HTTPResponse::HTTP_OK; + response.description="Starting render: "+command[3]+"\n"; + } + else { + response.status=HTTPResponse::HTTP_BAD_REQUEST; + response.description="Rotor: session busy\n"; + } + } + else { + response.status=HTTPResponse::HTTP_BAD_REQUEST; + response.description="Rotor: no output file specified\n"; + } + } + if (command[0]=="DELETE") { + //DUMMY RESPONSE + //SHOULD CHECK REQUIREMENTS + response.status=HTTPResponse::HTTP_OK; + response.description="DUMMY RESPONSE Rotor: cancelling render\n"; + } + } + return response; +} + +//http://blog.tomaka17.com/2012/03/libavcodeclibavformat-tutorial/ +//great to use c++11 features + +bool Render_context::load_audio(const string &filename,vector processors){ + + av_register_all(); + + AVFrame* frame = avcodec_alloc_frame(); + if (!frame) + { + std::cout << "Error allocating the frame" << std::endl; + return false; + } + + AVFormatContext* formatContext = NULL; + if (avformat_open_input(&formatContext, filename.c_str(), NULL, NULL) != 0) + { + av_free(frame); + std::cout << "Error opening the file" << std::endl; + return false; + } + + + if (avformat_find_stream_info(formatContext, NULL) < 0) + { + av_free(frame); + avformat_close_input(&formatContext); + std::cout << "Error finding the stream info" << std::endl; + return false; + } + + AVStream* audioStream = NULL; + for (unsigned int i = 0; i < formatContext->nb_streams; ++i) + { + if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) + { + audioStream = formatContext->streams[i]; + break; + } + } + + if (audioStream == NULL) + { + av_free(frame); + avformat_close_input(&formatContext); + std::cout << "Could not find any audio stream in the file" << std::endl; + return false; + } + + AVCodecContext* codecContext = audioStream->codec; + + codecContext->codec = avcodec_find_decoder(codecContext->codec_id); + if (codecContext->codec == NULL) + { + av_free(frame); + avformat_close_input(&formatContext); + std::cout << "Couldn't find a proper decoder" << std::endl; + return false; + } + else if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0) + { + av_free(frame); + avformat_close_input(&formatContext); + std::cout << "Couldn't open the context with the decoder" << std::endl; + return false; + } + + av_dump_format(formatContext, 0, 0, false); //avformat.h line 1256 + int samples = ((formatContext->duration + 5000)*codecContext->sample_rate)/AV_TIME_BASE; + graph.duration=((float)formatContext->duration)/AV_TIME_BASE; + + std::cout << "This stream has " << codecContext->channels << " channels, a sample rate of " << codecContext->sample_rate << "Hz and "<sample_fmt<< " (aka "<< av_get_sample_fmt_name(codecContext->sample_fmt) << ") "<init(codecContext->channels,16,samples,codecContext->sample_rate) ){ + cerr<<"Plugin failed to initialse"<index) + { + // Try to decode the packet into a frame + int frameFinished = 0; + //int bytes = + avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); + + // Some frames rely on multiple packets, so we have to make sure the frame is finished before + // we can use it + if (frameFinished) + { + // frame now has usable audio data in it. How it's stored in the frame depends on the format of + // the audio. If it's packed audio, all the data will be in frame->data[0]. If it's in planar format, + // the data will be in frame->data and possibly frame->extended_data. Look at frame->data, frame->nb_samples, + // frame->linesize, and other related fields on the FFmpeg docs. I don't know how you're actually using + // the audio data, so I won't add any junk here that might confuse you. Typically, if I want to find + // documentation on an FFmpeg structure or function, I just type " doxygen" into google (like + // "AVFrame doxygen" for AVFrame's docs) + + //av_get_channel_layout_string (char *buf, int buf_size, int nb_channels, uint64_t channel_layout) + + + //now we can pass the data to the processor(s) + for (auto p: processors) { + p->process_frame(frame->data[0],frame->nb_samples); + } + sample_processed+=frame->nb_samples; + mutex.lock(); + progress=((double)sample_processed)/samples; + mutex.unlock(); + } + } + // You *must* call av_free_packet() after each call to av_read_frame() or else you'll leak memory + av_free_packet(&packet); + } + + // Some codecs will cause frames to be buffered up in the decoding process. If the CODEC_CAP_DELAY flag + // is set, there can be buffered up frames that need to be flushed, so we'll do that + if (codecContext->codec->capabilities & CODEC_CAP_DELAY) + { + av_init_packet(&packet); + // Decode all the remaining frames in the buffer, until the end is reached + int frameFinished = 0; + int bytes = avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet); + while (bytes >= 0 && frameFinished) + { + for (auto p: processors) { + p->process_frame(frame->data[0],frame->nb_samples); + } + mutex.lock(); + progress=((double)sample_processed)/samples; + mutex.unlock(); + } + } + + cerr << "finished processing: "<cleanup(); + } + + + av_free(frame); + avcodec_close(codecContext); + avformat_close_input(&formatContext); + + return true; +} +bool Render_context::load_video(string nodeID,string &filename){ +} -- cgit v1.2.3