#include "rotor.h" #include "nodes_audio_analysis.h" #include "nodes_drawing.h" #include "nodes_maths.h" #include "nodes_filters.h" using namespace Rotor; Node_factory::Node_factory(){ //for now, statically load prototype map in constructor add_type("audio_analysis",new Audio_analysis()); add_type("divide",new Signal_divide()); add_type("bang",new Is_new_integer()); add_type("signal_output",new Signal_output()); add_type("testcard",new Testcard()); add_type("video_output",new Video_output()); add_type("video_loader",new Video_loader()); add_type("on_off",new On_off()); add_type("invert",new Invert()); add_type("video_cycler",new Video_cycler()); add_type("luma_levels",new Luma_levels()); add_type("echo_trails",new Echo_trails()); add_type("time",new Time()); add_type("track_time",new Track_time()); add_type("comparison",new Comparison()); //TODO: alias to symbols add_type("arithmetic",new Arithmetic()); //TODO: alias to symbols add_type("signal_colour",new Signal_colour()); add_type("signal_greyscale",new Signal_greyscale()); add_type("image_arithmetic",new Image_arithmetic()); add_type("random",new Random()); add_type("blend",new Blend()); add_type("mirror",new Mirror()); add_type("monochrome",new Monochrome()); add_type("transform",new Transform()); add_type("alpha_merge",new Alpha_merge()); add_type("shape",new Shape()); add_type("blur",new Blur()); } bool Signal_input::connect(Signal_node* source) { if (source->output_type=="signal") { connection=(Node*)source; return true; } else return false; } void Parameter_input::update(const Time_spec& time){ //gets input and updates variable if (receiver){ *receiver=((Signal_node*)connection)->get_output(time); } } bool Image_input::connect(Image_node* source) { if (source->output_type=="image") { connection=(Node*)source; return true; } else return false; } void Node::update_params(const Time_spec& time){ //compute connected parameters for (auto p:parameter_inputs){ p->update(time); } } bool Signal_output::render(const float duration, const float framerate,string &xml_out){ //testing signal routes Logger& logger = Logger::get("Rotor"); logger.information("SIgnal_output rendering "+ofToString(duration)+" seconds at "+ofToString(framerate)+" fps"); float step=1.0f/framerate; float v=0.0f; float min=10000000.0f; float max=-10000000.0f; for (float f=0.0f;f"+ofToString(u)+"\n"); v=u; if (v>max) max=v; if (v\n"); return true; } bool Audio_thumbnailer::init(int _channels,int _bits,int _samples,int _rate) { //base_audio_processor::init(_channels,_bits,_samples); channels=_channels; bits=_bits; samples=_samples; samples_per_column=samples/width; column=0; //point thumbnail bitmap out_sample=0; //sample in whole track offset=0x1<<(bits-1); //signed audio scale=1.0/offset; sample=0; samples=0; accum=0.0; return true; } int Audio_thumbnailer::process_frame(uint8_t *_data,int samples_in_frame){ //begin by processing remaining samples //samples per column could be larger than a frame! (probably is) //but all we are doing is averaging int bytes=(bits>>3); int stride=channels*bytes; int in_sample=0; while (in_sample>1; for (int i=0;iwrite((char*)data,width*height); //tring output; /* for (int j=0;jclose(); delete enc; return output.str(); } bool Audio_analysis::init(int _channels,int _bits,int _samples, int _rate) { //need these to make sense of data channels=_channels; bits=_bits; samples=_samples; return analyser.init(soname,id,_channels,_bits,_samples,_rate,outputNo,params); //attempt to load vamp plugin and prepare to receive frames of data //should the audio analysis contain a vamphost or should it inherit? //maybe neater to contain it in terms of headers etc } int Audio_analysis::process_frame(uint8_t *data,int samples_in_frame) { analyser.process_frame(data,samples_in_frame); return 1; } void Audio_analysis::cleanup() { analyser.cleanup(); //print_features(); } void Audio_analysis::print_features(){ for (auto i: analyser.features) { cerr<<" ["<RGBdata); } vf+=vstep; progress=vf/duration; } exporter.finishRecord(); gettimeofday(&end, NULL); float mtime = ((end.tv_sec-start.tv_sec) + (end.tv_usec-start.tv_usec)/1000000.0) + 0.5; logger.information("Video_output: rendered "+output_filename+": in "+ofToString(mtime)+" seconds"); if (usingaudio) audioloader.close(); return true; } } return false; } bool Video_loader::load(const string &_filename){ Logger& logger = Logger::get("Rotor"); if (isLoaded) { player.cleanup(); ///should be in decoder class? isLoaded=false; } Poco::Path path; string uri="file://"+_filename; isLoaded=player.open(uri); if (isLoaded){ logger.information("Video_loader loaded "+filename+": "+ofToString(player.getNumberOfFrames())+" frames, "+ofToString(player.getFrameRate())+" fps, "+ofToString(player.getWidth())+"x"+ofToString(player.getHeight())); return true; } logger.error("Video_loader failed to load "+filename); return false; } Image* Video_loader::output(const Frame_spec &frame){ if (isLoaded){ //this approach is running into the inability to seek when requesting playback speed > 1. //need to cache frames so as to avoid asking for a frame other than the next one. //need an algorithm to find the previous keyframe and seek forward float clipframerate=(framerate==0.0f?player.getFrameRate():framerate); float clipspeed=(clipframerate/frame.framerate)*speed; int wanted=(((int) ((frame.time*frame.framerate*clipspeed)+0.5))%(player.getNumberOfFrames()-1))+1; //+1 is necessary because 1st frame in a video is number 1? if (wanted!=lastframe){ if (!player.fetchFrame(frame.w,frame.h,wanted)) { //seek fail Logger& logger = Logger::get("Rotor"); logger.error("Video_loader failed to seek frame "+ofToString(wanted)+" of "+filename); if (image.w>0) return ℑ //just return the previous frame if possible else return nullptr; } image.setup_fromRGB(frame.w,frame.h,player.pFrameRGB->data[0],player.pFrameRGB->linesize[0]-(frame.w*3)); lastframe=wanted; } return ℑ } return nullptr; }; /* bool CVideo_loader::load(const string &filename){ Poco::Path path; string uri="file://"+filename; isLoaded=player.open(filename); if (isLoaded){ cerr<<"Rotor::Video_loader: "<