#ifndef ROTOR_TRANSFORMS #define ROTOR_TRANSFORMS #include "rotor.h" namespace Rotor { #define TRANSFORM_nearest 1 #define TRANSFORM_linear 2 #define TRANSFORM_area 3 #define TRANSFORM_cubic 4 #define TRANSFORM_lanczos 5 class Transformer: public Image_node { //base class for nodes that transform //what is the best coordinate system to use? //origin: corner or centre //units: pixel or fractional //aspect: scaled or homogenous public: Transformer(){ create_parameter("transformX","number","X transformation","Transform X",0.0f); create_parameter("transformY","number","Y transformation","Transform Y",0.0f); create_parameter("originX","number","X transformation origin","Origin X",0.5f); create_parameter("originY","number","Y transformation origin","Origin Y",0.5f); create_parameter("rotation","number","Rotation about origin","Rotation",0.0f); create_parameter("scale","number","Scale about origin","Scale",1.0f); create_attribute("filter","Filtering mode","Filter mode","linear",{"nearest","linear","area","cubic","lanczos"}); }; Image *transform(Image *in){ if (in){ //INTER_NEAREST - a nearest-neighbor interpolation //INTER_LINEAR - a bilinear interpolation (used by default) //INTER_AREA - resampling using pixel area relation. It may be a preferred method for image decimation, as it gives moire’-free results. But when the image is zoomed, it is similar to the INTER_NEAREST method. //INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood //INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood int filtmode; switch(attributes["filter"]->intVal){ case TRANSFORM_nearest: filtmode=cv::INTER_NEAREST; break; case TRANSFORM_linear: filtmode=cv::INTER_LINEAR; break; case TRANSFORM_area: filtmode=cv::INTER_AREA; break; case TRANSFORM_cubic: filtmode=cv::INTER_CUBIC; break; case TRANSFORM_lanczos: filtmode=cv::INTER_LANCZOS4; break; } float tX=parameters["transformX"]->value; float tY=parameters["transformY"]->value; float oX=parameters["originX"]->value; float oY=parameters["originY"]->value; float r=(parameters["rotation"]->value/180)*3.1415926f; float s=parameters["scale"]->value; //do opencv transform cv::Point2f srcTri[3], dstTri[3]; cv::Mat rot_mat(2,3,CV_32FC1); cv::Mat trans_mat(2,3,CV_32FC1); cv::Mat out_mat(3,3,CV_32FC1); //normally a scale of 1 will place the image on screen at pixel size //it should be that a scale of 1 places it at width w //how to scale around the centre cv::Mat inter; //if (s<1){ // if (s<.01) s=.01; // float scalefac=((float)image.w/in->w)*s; // cv::resize(in->rgb,inter,cv::Size(in->w*scalefac,in->h*scalefac),s,s); //double fx=0, double fy=0, int interpolation=INTER_LINEAR )¶ // s=1.0f; //} //else { inter=in->rgb; s=((float)image.w/in->w)*s; //} // Compute matrix by creating triangle and transforming //is there a better way - combine the 2? Just a bit of geometry srcTri[0].x=0; srcTri[0].y=0; srcTri[1].x=image.w-1; srcTri[1].y=0; srcTri[2].x=0; srcTri[2].y=image.h-1; for (int i=0;i<3;i++){ dstTri[i].x=srcTri[i].x+(tX*image.w); dstTri[i].y=srcTri[i].y+(tY*image.w); //use w for equiv coords //rotate and scale around centre //transform to centre dstTri[i].x-=(oX*image.w); dstTri[i].y-=(oY*image.w); dstTri[i].x*=s; dstTri[i].y*=s; float dx=(dstTri[i].x*cos(r))-(dstTri[i].y*sin(r)); float dy=(dstTri[i].x*sin(r))+(dstTri[i].y*cos(r)); dstTri[i].x=dx; dstTri[i].y=dy; //transform back dstTri[i].x+=(oX*image.w); dstTri[i].y+=(oY*image.w); } trans_mat=getAffineTransform( srcTri, dstTri ); warpAffine( in->rgb, image.rgb, trans_mat, image.rgb.size(), filtmode, cv::BORDER_WRAP); // Compute rotation matrix // //cv::Point centre = cv::Point( oX*image.w, oY*image.h ); //rot_mat = getRotationMatrix2D( centre, r, s ); // Do the transformation // //warpAffine( inter.rgb, image.rgb, rot_mat, image.rgb.size(), filtmode, cv::BORDER_WRAP); //BORDER_WRAP //trans_mat.resize(3); //rot_mat.resize(3); //trans_mat.data[8]=1.0f; //rot_mat.data[8]=1.0f; //out_mat=rot_mat*trans_mat; //out_mat.resize(2); //warpAffine( inter, image.rgb, out_mat, image.rgb.size(), filtmode, cv::BORDER_WRAP); return ℑ } return nullptr; } private: }; class Transform: public Transformer { public: Transform(){ create_image_input("image input","Image input"); title="Transform"; description="Apply 2D transformation"; UID="c798586c-2d0a-11e3-a736-6f81df06fd1b"; }; Transform(map &settings):Transform() { base_settings(settings); }; ~Transform(){ }; Transform* clone(map &_settings) { return new Transform(_settings);}; Image *output(const Frame_spec &frame){ return transform(image_inputs[0]->get(frame)); } private: }; class Still_image: public Transformer { public: Still_image(){ create_attribute("filename","name of image file to load","File name",""); title="Still image"; description="Load a still image and apply 2D transformation"; UID="d464b0d6-2d0a-11e3-acb7-6be231762445"; }; Still_image(map &settings):Still_image() { base_settings(settings); if (attributes["filename"]->value!=""){ string filewithpath=find_setting(settings,"media_path","")+attributes["filename"]->value; if (still.read_file(filewithpath)) { cerr<<"Still_image: loaded "< &_settings) { return new Still_image(_settings);}; Image *output(const Frame_spec &frame){ if (!still.rgb.empty()){ return transform(&still); } else return nullptr; } private: Image still; }; } #endif