diff options
| author | Comment <tim@gray.(none)> | 2013-07-26 22:46:00 +0100 |
|---|---|---|
| committer | Comment <tim@gray.(none)> | 2013-07-26 22:46:00 +0100 |
| commit | 3d7eea02aa7a155b84c8c74ecbfd55a1941a9297 (patch) | |
| tree | d49d6ac97a0df08f5ea7e6c6c291acca0f65cd12 /rotord/rotor.cpp | |
| parent | 7092eaaae3e844a68804b8a6b6825381e9a81443 (diff) | |
tidy files
Diffstat (limited to 'rotord/rotor.cpp')
| -rwxr-xr-x | rotord/rotor.cpp | 361 |
1 files changed, 0 insertions, 361 deletions
diff --git a/rotord/rotor.cpp b/rotord/rotor.cpp deleted file mode 100755 index 8b72c50..0000000 --- a/rotord/rotor.cpp +++ /dev/null @@ -1,361 +0,0 @@ -#include "rotor.h" -#include "nodes_audio_analysis.h" -#include "nodes_drawing.h" - -using namespace Rotor; -Node_factory::Node_factory(){ - //for now, statically load prototype map in constructor - add_type("audio_analysis",new Audio_analysis()); - add_type("divide",new Signal_divide()); - add_type("bang",new Is_new_integer()); - add_type("signal_output",new Signal_output()); - add_type("testcard",new Testcard()); - add_type("video_output",new Video_output()); - add_type("video_loader",new Video_loader()); - add_type("on_off",new On_off()); - add_type("invert",new Invert()); - add_type("video_cycler",new Video_cycler()); - add_type("luma_levels",new Luma_levels()); - add_type("echo_trails",new Echo_trails()); - add_type("time",new Time()); - add_type("track_time",new Track_time()); - add_type("comparison",new Comparison()); //TODO: alias to symbols - add_type("arithmetic",new Arithmetic()); //TODO: alias to symbols - add_type("signal_colour",new Signal_colour()); - add_type("signal_greyscale",new Signal_greyscale()); - add_type("image_arithmetic",new Image_arithmetic()); - add_type("random",new Random()); - add_type("blend",new Blend()); - add_type("mirror",new Mirror()); - add_type("monochrome",new Monochrome()); - add_type("transform",new Transform()); - add_type("alpha_merge",new Alpha_merge()); - add_type("draw",new Draw()); -} - -bool Signal_input::connect(Signal_node* source) { - if (source->output_type=="signal") { - connection=(Node*)source; - return true; - } - else return false; -} -void Parameter_input::update(const Time_spec& time){ //gets input and updates variable - if (receiver){ - *receiver=((Signal_node*)connection)->get_output(time); - } -} -bool Image_input::connect(Image_node* source) { - if (source->output_type=="image") { - connection=(Node*)source; - return true; - } - else return false; -} -void Node::update_params(const Time_spec& time){ //compute connected parameters - for (auto p:parameter_inputs){ - p->update(time); - } -} -bool Signal_output::render(const float duration, const float framerate,string &xml_out){ - //testing signal routes - cerr << "Rotor: Signal_output rendering " << duration << " seconds at " << framerate << " frames per second" << endl; - float step=1.0f/framerate; - float v=0.0f; - float min=10000000.0f; - float max=-10000000.0f; - for (float f=0.0f;f<duration;f+=step) { - float u=get_output(Time_spec(f,framerate,duration)); - if (!fequal(u,v)) { - xml_out+=("<signal time='"+ofToString(f)+"'>"+ofToString(u)+"</signal>\n"); - v=u; - if (v>max) max=v; - if (v<min) min=v; - } - } - xml_out+=("<signal_finished min='"+ofToString(min)+"' max='"+ofToString(max)+"'/>\n"); - return true; -} - -bool Audio_thumbnailer::init(int _channels,int _bits,int _samples,int _rate) { - //base_audio_processor::init(_channels,_bits,_samples); - channels=_channels; - bits=_bits; - samples=_samples; - samples_per_column=samples/width; - column=0; //point thumbnail bitmap - out_sample=0; //sample in whole track - offset=0x1<<(bits-1); //signed audio - scale=1.0/offset; - sample=0; - samples=0; - accum=0.0; - return true; -} -int Audio_thumbnailer::process_frame(uint8_t *_data,int samples_in_frame){ - //begin by processing remaining samples - //samples per column could be larger than a frame! (probably is) - //but all we are doing is averaging - int bytes=(bits>>3); - int stride=channels*bytes; - int in_sample=0; - while (in_sample<samples_in_frame&&column<width) { - //continue the column - while (sample<samples_per_column&&in_sample<samples_in_frame) { - //accumulate samples for this column until we run out of samples - for (int i=0;i<channels;i++) { - unsigned int this_val=0; - for (int j=0;j<bytes;j++) { - this_val+=_data[(in_sample*stride)+(i*bytes)+j]<<(j*8); - } - //convert from integer data format - i.e s16p - to audio signal in -1..1 range - //presume 16 bits for now... - double val=((double)((int16_t)this_val))*scale; - accum+=val*val; - samples++; - } - in_sample++; - sample++; - out_sample++; - } - if (sample==samples_per_column) { //finished a column - //get root-mean - double mean=pow(accum/samples,0.5); - //if (column==0) { - // cerr << "first column total: "<< accum << " in " << samples << " samples, average " << (accum/samples)<<endl; - //} - int colheight=height*mean*0.5; - int hh=height>>1; - for (int i=0;i<height;i++) { - data[i*width+column]=abs(i-hh)<colheight?0xff:0x00; - } - column++; - sample=0; - samples=0; - accum=0.0; - } - } - return out_sample; -} -string Audio_thumbnailer::print(){ - //base64 encode the image data output it - - stringstream output; - Poco::Base64Encoder *enc=new Poco::Base64Encoder(output); - - enc->write((char*)data,width*height); - //tring output; - /* - for (int j=0;j<height;j++) { - for (int i=0;i<width;i++) { - output+=data[j*width+i]<0x7f?"0":"1"; - } - output +="\n"; - } - */ - enc->close(); - delete enc; - return output.str(); -} -bool Audio_analysis::init(int _channels,int _bits,int _samples, int _rate) { - //need these to make sense of data - channels=_channels; - bits=_bits; - samples=_samples; - - return analyser.init(soname,id,_channels,_bits,_samples,_rate,outputNo,params); - - - //attempt to load vamp plugin and prepare to receive frames of data - //should the audio analysis contain a vamphost or should it inherit? - //maybe neater to contain it in terms of headers etc - -} -int Audio_analysis::process_frame(uint8_t *data,int samples_in_frame) { - analyser.process_frame(data,samples_in_frame); - return 1; -} -void Audio_analysis::cleanup() { - analyser.cleanup(); - //print_features(); -} -void Audio_analysis::print_features(){ - for (auto i: analyser.features) { - cerr<<" ["<<i.second<<":"<<i.first<<"]"; - } - cerr<<endl; -} - -bool Video_output::render(const float duration, const float framerate,const string &output_filename,const string &audio_filename,float& progress,int outW,int outH){ - - // - //setup defaults - int bitRate=5000000; - AVCodecID codecId=AV_CODEC_ID_H264; //MPEG4; - std::string container ="mp4"; - - - //at the moment it crashes if you render before audio is loaded and also on 2nd render - libav::exporter exporter; - - float spct=100.0f/duration; - - if (exporter.setup(outW,outH,bitRate,framerate,container)) { //codecId, - if (exporter.record(output_filename)) { - - libav::audioloader audioloader; - - bool usingaudio=audioloader.setup(audio_filename); - - cerr << "Rotor: Video_output rendering " << duration << " seconds at " << framerate << " fps, audio frame size: " << exporter.get_audio_framesize()<<endl; - //25fps video and 43.06640625fps audio? hmm - //how to get the timecodes correct for the interleaved files - - struct timeval start, end; - - gettimeofday(&start, NULL); - - - float vstep=1.0f/framerate; - float v=0.0f; - float vf=0.0f; - float af=0.0f; - while (vf<duration){ //-vstep) { - if (usingaudio) { - while (!fless(af,vf)) { - //insert audio frames until we are ahead of the video - exporter.encodeFrame(audioloader.get_samples(exporter.get_audio_framesize())); - af+=exporter.get_audio_step(); - - } - } - - - //[mp3 @ 0x7fffe40330e0] max_analyze_duration 5000000 reached at 5015510 microseconds - //[mp3 @ 0x7fffe4033ec0] Insufficient thread locking around avcodec_open/close() - //[mp3 @ 0x7fffe40330e0] Estimating duration from bitrate, this may be inaccurate - //[libx264 @ 0x7fffe8003940] using cpu capabilities: MMX2 SSE2Fast SSSE3 FastShuffle SSE4.2 - //[libx264 @ 0x7fffe8003940] profile High, level 3.0 - //[libx264 @ 0x7fffe8003940] 264 - core 123 r2189 35cf912 - H.264/MPEG-4 AVC codec - Copyleft 2003-2012 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=12 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=10 keyint_min=1 scenecut=40 intra_refresh=0 rc_lookahead=10 rc=abr mbtree=1 bitrate=400 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00 - //Assertion ff_avcodec_locked failed at libavcodec/utils.c:2967 - - //cerr<<"videoloader: "<<vf<<" seconds, vstep "<<vstep<<" ,asking for frame "<<((int)((vf*framerate)+0.5))<<endl; - - Image* i=get_output(Frame_spec(vf,framerate,duration,outW,outH)); - if (i) { - exporter.encodeFrame(i->RGBdata); - - } - vf+=vstep; - progress=vf/duration; - } - - exporter.finishRecord(); - - gettimeofday(&end, NULL); - - float mtime = ((end.tv_sec-start.tv_sec) + (end.tv_usec-start.tv_usec)/1000000.0) + 0.5; - - printf("Rotor Video_output: rendered in %02f seconds\n", mtime); - - if (usingaudio) audioloader.close(); - - return true; - } - } - - return false; -} - -bool Video_loader::load(const string &filename){ - /* - //gstreamer needs absolute paths ALWAYS - //string uri="file:///home/tim/workspace/rotor/rotord/"+filename; - Poco::Path path; - string uri="file://"+path.current()+filename; - //cerr << "video input: loading "<<uri<<endl; - if (player->loadMovie(uri)){ - player->play(); - player->setPaused(true); - player->setFrameByFrame(true); - player->update(); - cerr<<"Rotor::Video_loader: "<<filename<<", "<<player->getDuration()<<" seconds "<<", "<<player->getWidth()<<"x"<<player->getHeight()<<endl; - image->setup_fromRGB(player->getWidth(),player->getHeight(),(uint8_t*) player->getPixels()); - return true; - } - */ - if (isLoaded) { - player.cleanup(); ///should be in decoder class? - isLoaded=false; - } - Poco::Path path; - string uri="file://"+filename; - isLoaded=player.open(uri); - if (isLoaded){ - cerr<<"Rotor::Video_loader: "<<filename<<", "<<player.getNumberOfFrames()<<" frames "<<", "<<player.getWidth()<<"x"<<player.getHeight()<<endl; - return true; - } - cerr<<"Rotor::Video_loader: failed to load "<<filename<<endl; - return false; -} -Image* Video_loader::output(const Frame_spec &frame){ - //wonder about the actual mechanism used by gstreamer - //have to implment callback when seek is ready? - //presume gstreamer caches a loaded frame? - - - //deal with reolution: swscale from avcodec or put scaler in pipeline? - //can image node point to buffer in gst rather than copying the pixels? - - //to test using fp time to seek: need a short movie with synced audio - - //fix actual duration and audio file - //trace frame that is being read - /* - if (player->isLoaded()){ - //player->setPosition(frame.time); - int wanted=((int) (frame.time*frame.framerate))%(player->getTotalNumFrames()-2); //-2?? - player->setFrame(wanted); - //while (player->getCurrentFrame()!=wanted){ - // cerr << "seeking to "<<wanted<<" :"<<player->getCurrentFrame()<<endl; - //player->setFrame(wanted); - //player->update(); - // sleep(.001); - //} - player->update(); - image->RGBdata=player->getPixels(); //don't really know why this is needed every frame - //cerr<<"Video_loader: retrieving frame "<<((int) (frame.time*frame.framerate))<<endl; - return image; - } - */ - - if (isLoaded){ - int wanted=(((int) ((frame.time*frame.framerate)+0.5))%(player.getNumberOfFrames())); //+1 is necessary because 1st frame in a video is number 1? - - - //if (wanted==99){ - // cerr<<"videoloader: near the end"<<endl; - //} - - //cerr<<"videoloader: requesting frame "<<wanted<<endl; - //if (wanted==68) { - // int nothing=0; - //} - - if (!player.fetchFrame(frame.w,frame.h,wanted)) { //seek fail - cerr<<"Rotor: failed to seek frame"<<endl; - if (image.w>0) return ℑ //just return the previous frame if possible - else return nullptr; - }; - //cerr<<"Video_loader: setting up frame: lineoffset="<<(player.pFrameRGB->linesize[0]-(frame.w*3))<<endl; - image.setup_fromRGB(frame.w,frame.h,player.pFrameRGB->data[0],player.pFrameRGB->linesize[0]-(frame.w*3)); - return ℑ - } - - //confusingly, crashes with files not made with short files? - //seems to be on last frame? - returns nullptr - still tries to clone? - //can't really return 1st frame instead, should get # of frames right 1st? - //think about what echo trails does on the last frame - - return nullptr; -}; |
