#include "rotor.h" using namespace Rotor; Node_factory::Node_factory(){ //for now, statically load prototype map in constructor add_type("audio_analysis",new Audio_analysis()); add_type("divide",new Signal_divide()); add_type("bang",new Is_new_integer()); add_type("signal_output",new Signal_output()); add_type("testcard",new Testcard()); add_type("video_output",new Video_output()); add_type("video_input",new Video_input()); } bool Signal_input::connect(Signal_node* source) { if (source->output_type=="signal") { connection=(Node*)source; return true; } else return false; } bool Image_input::connect(Image_node* source) { if (source->output_type=="image") { connection=(Node*)source; return true; } else return false; } bool Signal_output::render(const float duration, const float framerate,string &xml_out){ //testing signal routes cerr << "Rotor: Signal_output rendering " << duration << " seconds at " << framerate << " frames per second" << endl; float step=1.0f/framerate; float v=0.0f; for (float f=0.0f;f"+ofToString(u)+"\n"); v=u; } } return true; } bool Audio_thumbnailer::init(int _channels,int _bits,int _samples,int _rate) { //base_audio_processor::init(_channels,_bits,_samples); channels=_channels; bits=_bits; samples=_samples; samples_per_column=samples/width; column=0; //point thumbnail bitmap out_sample=0; //sample in whole track offset=0x1<<(bits-1); //signed audio scale=1.0/offset; sample=0; samples=0; accum=0.0; return true; } int Audio_thumbnailer::process_frame(uint8_t *_data,int samples_in_frame){ //begin by processing remaining samples //samples per column could be larger than a frame! (probably is) //but all we are doing is averaging int bytes=(bits>>3); int stride=channels*bytes; int in_sample=0; while (in_sample>1; for (int i=0;iwrite((char*)data,width*height); //tring output; /* for (int j=0;jclose(); delete enc; return output.str(); } bool Audio_analysis::init(int _channels,int _bits,int _samples, int _rate) { //need these to make sense of data channels=_channels; bits=_bits; samples=_samples; return analyser.init(soname,id,_channels,_bits,_samples,_rate); //attempt to load vamp plugin and prepare to receive frames of data //should the audio analysis contain a vamphost or should it inherit? //maybe neater to contain it in terms of headers etc } int Audio_analysis::process_frame(uint8_t *data,int samples_in_frame) { analyser.process_frame(data,samples_in_frame); return 1; } void Audio_analysis::cleanup() { analyser.cleanup(); //print_features(); } void Audio_analysis::print_features(){ for (auto i: analyser.features) { cerr<setup(outW,outH,bitRate,frameRate,container)) { //codecId, if (exporter->record(output_filename)) { cerr << "Rotor: Video_output rendering " << duration << " seconds at " << framerate << " fps, audio frame size: " << exporter->get_audio_framesize()<pts comes from video while (flessorequal(vf,af)) { //insert audio frames until we are ahead of the video uint16_t* s=audioloader.get_samples(exporter->get_audio_framesize()); exporter->encodeFrame(s); af+=exporter->get_audio_step(); } Image* i=get_output(Frame_spec(vf,framerate,outW,outH)); exporter->encodeFrame(i->RGBdata); vf+=vstep; /* if (!exporter->encodeFrame(i->RGBdata)){ //if (!exporter->encodeFrame(get_output(Frame_spec(f,framerate,outW,outH))->RGBdata,audioloader.get_packet())){ cerr << "Rotor: video output failed"<99.5){ cerr<<"stop here"<finishRecord(); cerr << "Rotor: Video_output finished "<< endl; return true; } } return false; } bool Video_input::load(const string &filename){ //gstreamer needs absolute paths ALWAYS //string uri="file:///home/tim/workspace/rotor/rotord/"+filename; Poco::Path path; string uri="file://"+path.current()+filename; //cerr << "video input: loading "<loadMovie(uri)){ player->play(); player->setPaused(true); player->setFrameByFrame(true); player->update(); cerr<<"Rotor::Video_input loaded "<getDuration()<<" seconds "<<", "<getWidth()<<"x"<getHeight()<setup_fromRGB(player->getWidth(),player->getHeight(),(uint8_t*) player->getPixels()); return true; } return false; } Image* Video_input::get_output(const Frame_spec &frame){ //wonder about the actual mechanism used by gstreamer //have to implment callback when seek is ready? //presume gstreamer caches a loaded frame? //if (player->isLoaded()){ // image.setup(player) //} //deal with reolution: swscale from avcodec or put scaler in pipeline? //can image node point to buffer in gst rather than copying the pixels? //to test using fp time to seek: need a short movie with synced audio //fix actual duration and audio file //trace frame that is being read if (player->isLoaded()){ //player->setPosition(frame.time); int wanted=((int) (frame.time*frame.framerate))%(player->getTotalNumFrames()-1); player->setFrame(wanted); //while (player->getCurrentFrame()!=wanted){ // cerr << "seeking to "<getCurrentFrame()<setFrame(wanted); //player->update(); // sleep(.001); //} player->update(); image->RGBdata=player->getPixels(); //don't really know why this is needed every frame //cerr<<"Video_input: retrieving frame "<<((int) (frame.time*frame.framerate))<