summaryrefslogtreecommitdiff
path: root/rotord/src
diff options
context:
space:
mode:
Diffstat (limited to 'rotord/src')
-rw-r--r--rotord/src/cvimage.cpp11
-rw-r--r--rotord/src/cvimage.h7
-rw-r--r--rotord/src/nodes_channels.h588
-rw-r--r--rotord/src/nodes_signals.h103
-rw-r--r--rotord/src/nodes_source.h74
-rw-r--r--rotord/src/nodes_transform.h5
-rw-r--r--rotord/src/rotor.h23
7 files changed, 811 insertions, 0 deletions
diff --git a/rotord/src/cvimage.cpp b/rotord/src/cvimage.cpp
index 0bf30a6..74a21dd 100644
--- a/rotord/src/cvimage.cpp
+++ b/rotord/src/cvimage.cpp
@@ -200,4 +200,15 @@ namespace Rotor {
other->rgb=rgb/amount;
return other;
}
+ cv::Mat& Image::get_mipmap(int level){
+ if (mipmaps.find(level)!=mipmaps.end()) return mipmaps[level];
+ //levels start at 1
+ int nw=max(1.0,w/pow(2,level));
+ int nh=max(1.0,h/pow(2,level));
+ cv::Mat mip;;
+ cv::resize(rgb,mip,cv::Size(nw,nh),0,0,cv::INTER_AREA );
+ mipmaps[level]=mip;
+ return mipmaps[level];
+
+ }
}
diff --git a/rotord/src/cvimage.h b/rotord/src/cvimage.h
index ebf6f4c..8a6fdae 100644
--- a/rotord/src/cvimage.h
+++ b/rotord/src/cvimage.h
@@ -4,6 +4,7 @@
#include <math.h>
#include <cv.h>
#include <highgui.h>
+#include <unordered_map>
//converting to use a cv image...
//cv::Mat supports most of what we want here
@@ -219,6 +220,12 @@ namespace Rotor {
cv::Mat rgb;
cv::Mat alpha;
+
+ //store a library of mipmaps
+ std::unordered_map<int,cv::Mat> mipmaps;
+
+ cv::Mat& get_mipmap(int level);
+
};
}
diff --git a/rotord/src/nodes_channels.h b/rotord/src/nodes_channels.h
new file mode 100644
index 0000000..8e7d2a0
--- /dev/null
+++ b/rotord/src/nodes_channels.h
@@ -0,0 +1,588 @@
+#ifndef ROTOR_NODES_CHANNELS
+#define ROTOR_NODES_CHANNELS
+
+#include "rotor.h"
+
+namespace Rotor {
+ class Invert: public Image_node {
+ public:
+ Invert(){
+ create_image_input("Image to invert","Image input");
+ create_parameter("invert","number","Invert when greater than 0.0","Negative",1.0f,0.0f,1.0f);
+ title="Negative";
+ description="Inverts the input picture";
+ UID="8676c25c-2d09-11e3-80a7-db36c774523c";
+ };
+ Invert(map<string,string> &settings) :Invert() {
+ base_settings(settings);
+ };
+ ~Invert(){};
+ Invert* clone(map<string,string> &_settings) { return new Invert(_settings);};
+ Image *output(const Frame_spec &frame){
+ Image *in=image_inputs[0]->get(frame);
+ if (in) {
+ if (parameters["invert"]->value>0.0f){
+ for (int i=0;i<in->w*in->h*3;i++) {
+ image.RGBdata[i]=255-in->RGBdata[i];
+ }
+ return &image;
+ }
+ return in;
+ }
+ return nullptr;
+ }
+ private:
+ };
+ class Monochrome: public Image_node {
+ public:
+ Monochrome(){
+ create_image_input("image input","Image input");
+ title="Monochrome";
+ description="Render video greyscale";
+ UID="2c3cb12e-2d0a-11e3-a46b-a34e44493cef";
+ };
+ Monochrome(map<string,string> &settings):Monochrome() {
+ base_settings(settings);
+ };
+ ~Monochrome(){
+ };
+ Monochrome* clone(map<string,string> &_settings) { return new Monochrome(_settings);};
+ Image *output(const Frame_spec &frame){
+ Image *in=image_inputs[0]->get(frame);
+ if (in){
+ for (int i=0;i<image.w;i++){
+ for (int j=0;j<image.h;j++){
+ uint8_t luma=0;
+ for (int l=0;l<3;l++) luma+=pixels.mono_weights[l][in->RGBdata[(((j*image.w)+i)*3)+l]];
+ for (int k=0;k<3;k++) image.RGBdata[(((j*image.w)+i)*3)+k]=luma;
+ }
+ }
+ return &image;
+ }
+ return nullptr;
+ }
+ private:
+ };
+#define BLEND_blend 1
+#define BLEND_screen 2
+#define BLEND_multiply 3
+#define BLEND_alpha 4
+#define BLEND_wrap 5
+#define BLEND_xor 6
+ class Blend: public Image_node {
+ public:
+ Blend(){
+ create_image_input("image input 1","Image input 1");
+ create_image_input("image input 2","Image input 2");
+ create_parameter("amount","number","amount to blend input 2","Blend amount",0.5f,0.0f,1.0f);
+ create_attribute("mode","Blend mode","Blend mode","blend",{"blend","screen","multiply","alpha","wrap","xor"});
+ title ="Blend";
+ description="Blend images in various modes";
+ UID="12ed7af0-2d0a-11e3-ae32-2b44203b93c9";
+ };
+ Blend(map<string,string> &settings):Blend() {
+ base_settings(settings);
+ };
+ ~Blend(){};
+ Blend* clone(map<string,string> &_settings) { return new Blend(_settings);};
+ Image *output(const Frame_spec &frame){
+ Image *in1=image_inputs[0]->get(frame);
+ if (in1){
+ Image *in2=image_inputs[1]->get(frame);
+ if (in2) {
+ image=*(in1);
+ switch(attributes["mode"]->intVal){
+ case BLEND_screen:
+ image+=(*in2);
+ break;
+ case BLEND_multiply:
+ image*=(*in2);
+ break;
+ case BLEND_xor:
+ image^=(*in2);
+ break;
+ case BLEND_alpha:
+ image=image.alpha_blend(*in2);
+ break;
+ case BLEND_wrap:
+ image=image.add_wrap(*in2);
+ break;
+ case BLEND_blend: //has to be last because of initialser of *in? go figure
+
+ image*=(1.0f-parameters["amount"]->value);
+ /* //problem here with leak
+ //opencv handles not being released
+ Image *in=(*in2)*parameters["amount"]->value;
+ image+=(*in);
+ delete in;
+ */
+ in=(*in2); //removed allocator
+ in*=parameters["amount"]->value;
+ image+=in;
+ break;
+ }
+ return &image;
+ }
+ //if there aren't 2 image inputs connected just return the first
+ return in1;
+ }
+ return nullptr;
+ }
+ private:
+ Image in;
+ };
+ #define ARITHMETIC_plus 1
+ #define ARITHMETIC_minus 2
+ #define ARITHMETIC_multiply 3
+ #define ARITHMETIC_divide 4
+ #define ARITHMETIC_modulo 5
+ class Image_arithmetic: public Image_node {
+ public:
+ Image_arithmetic(){
+ create_image_input("image input","Image input");
+ create_parameter("value","number","Value or signal for operation","Value",1.0f);
+ create_attribute("operator","operator for image","Operator","+",{"+","-","*","/"});
+ title="Image arithmetic";
+ description="Performs arithmetic on an image with a signal or value";
+ UID="bc3b633e-2d09-11e3-86b2-7fbba3d71604";
+ };
+ Image_arithmetic(map<string,string> &settings):Image_arithmetic() {
+ base_settings(settings);
+ }
+ ~Image_arithmetic(){};
+ Image *output(const Frame_spec &frame){
+ Image *in=image_inputs[0]->get(frame);
+ if (in){
+ switch (attributes["operator"]->intVal) {
+ case ARITHMETIC_plus:
+ image=(*in); //could be poss without copy?
+ image+=parameters["value"]->value;
+ break;
+ case ARITHMETIC_minus:
+ image=(*in);
+ image-=parameters["value"]->value;
+ break;
+ case ARITHMETIC_multiply:
+ image=(*in);
+ image*=parameters["value"]->value;
+ break;
+ case ARITHMETIC_divide:
+ image=(*in);
+ image/=parameters["value"]->value;
+ break;
+ }
+ }
+ return &image;
+ }
+ Image_arithmetic* clone(map<string,string> &_settings) { return new Image_arithmetic(_settings);};
+ private:
+ };
+ class Alpha_merge: public Image_node {
+ public:
+ Alpha_merge(){
+ create_image_input("image input","Image input");
+ create_image_input("alpha input","Alpha input");
+ title="Alpha merge";
+ description="Alpha merge two images";
+ UID="3f5e3eee-2d0a-11e3-8679-1374154a9fa8";
+ };
+ Alpha_merge(map<string,string> &settings):Alpha_merge() {
+ base_settings(settings);
+ };
+ ~Alpha_merge(){};
+ Alpha_merge* clone(map<string,string> &_settings) { return new Alpha_merge(_settings);};
+ Image *output(const Frame_spec &frame){
+ Image *in1=image_inputs[0]->get(frame);
+ if (in1){
+ //copy incoming image **writable
+ Image *in2=image_inputs[1]->get(frame);
+ if (in2) {
+ image=(*in1);
+ image.alpha_merge(*in2);
+ return &image;
+ }
+ //if there aren't 2 image inputs connected just return the first
+ return in1;
+ }
+ return nullptr;
+ }
+ private:
+ };
+ class Difference_matte: public Image_node {
+ public:
+ Difference_matte(){
+ create_image_input("image input","Image input");
+ create_image_input("background input","Background input");
+ create_parameter("threshold","number","Difference threshold","Threshold",0.2f,0.0f,1.0f);
+ create_parameter("feather","number","Feather width","Feather",0.1f,0.0f,1.0f);
+ create_parameter("weight_h","number","H component weight","Weight H",0.5f,0.0f,1.0f);
+ create_parameter("weight_s","number","S component weight","Weight S",0.5f,0.0f,1.0f);
+ create_parameter("weight_v","number","V component weight","Weight V",0.5f,0.0f,1.0f);
+ create_parameter("blursize","number","Blur size","Blur size",2.0f,0.0f,10.0f);
+ create_attribute("mode","Output {image|alpha}","output mode","alpha",{"image","alpha"});
+ title="Difference matte";
+ description="Create an alpha channel using a background reference picture";
+ LUT=nullptr;
+ UID="4db4d2c8-2d0a-11e3-b08b-7fb00f8c562a";
+ };
+ Difference_matte(map<string,string> &settings):Difference_matte() {
+ base_settings(settings);
+ };
+ ~Difference_matte(){if (LUT) delete[] LUT;};
+ Difference_matte* clone(map<string,string> &_settings) { return new Difference_matte(_settings);};
+ Image *output(const Frame_spec &frame){
+ Image *in1=image_inputs[0]->get(frame);
+ if (in1){
+ Image *in2=image_inputs[1]->get(frame);
+ if (in2) {
+ generate_LUT();
+
+ /*
+ cv::cvtColor(in1->rgb,greyfg,CV_RGB2GRAY);
+ cv::cvtColor(in2->rgb,greybg,CV_RGB2GRAY);
+ cv::absdiff(greyfg,greybg,greyDiff);
+
+ //parameters["threshold"]->value
+ cv::threshold(greyDiff,mask,parameters["threshold"]->value,255,CV_THRESH_BINARY); //int block_size=3, double param1=5); //int blockSize, int offset=0,bool invert=false, bool gauss=false);
+
+ //cv::adaptiveThreshold(greyDiff,mask,255,CV_ADAPTIVE_THRESH_GAUSSIAN_C,CV_THRESH_BINARY, 3,5); //int block_size=3, double param1=5); //int blockSize, int offset=0,bool invert=false, bool gauss=false);
+ */
+
+ cv::cvtColor(in1->rgb, hsv1, CV_RGB2HSV);
+ cv::cvtColor(in2->rgb, hsv2, CV_RGB2HSV);
+
+ mask.create(frame.h,frame.w,CV_8UC1);
+ lutmask.create(frame.h,frame.w,CV_8UC1);
+
+ //get euclidean distance in HSV space
+ int dist,d;
+ uint8_t m;
+
+ float weights[3] = {parameters["weight_h"]->value,parameters["weight_s"]->value,parameters["weight_v"]->value};
+ float weight_total=255.0f/pow(pow(weights[0]*255,2)+pow(weights[1]*255,2)+pow(weights[2]*255,2),0.5);
+
+ for (int i=0;i<frame.w*frame.h;i++){
+ dist=0;
+ for (int j=0;j<3;j++){
+ d=((int)hsv1.data[i*3+j])-((int)hsv2.data[i*3+j]);
+ dist+=(d*d)*weights[j];
+ }
+ uint8_t id=(uint8_t)(sqrt((float)dist)*weight_total);
+ mask.data[i]=id;
+ }
+
+ /*
+
+ for (int i=0;i<frame.w*frame.h;i++){
+ dist=0;
+ for (int j=0;j<3;j++){
+ d=((int)hsv1.data[i*3+j])-((int)hsv2.data[i*3+j]);
+ dist+=(abs(d))*weights[j];
+ }
+ uint8_t id=(uint8_t)(((float)dist)/weight_total);
+ m=LUT[id];
+ mask.data[i]=m;
+ }
+ */
+
+ //cv::bilateralFilter(mask,filtmask, 4,8,2 );
+ //cv::GaussianBlur(mask,filtmask,cv::Size( 4, 4 ), 2, 2);
+
+ int ksize=max((ceil(parameters["blursize"]->value/2.0)*2)+1,1.0);
+ //nb this doesn't do the intended: create 'continuously variable' blur
+ cv::GaussianBlur(mask,filtmask,cvSize(ksize,ksize),parameters["blursize"]->value);
+
+
+ for (int i=0;i<frame.w*frame.h;i++){
+ lutmask.data[i]=LUT[filtmask.data[i]];
+ }
+
+
+ image=(*in1);
+ if (attributes["mode"]->value=="image"){
+ cv::cvtColor(lutmask, image.rgb, CV_GRAY2RGB);
+ }
+ else image.alpha_from_cv(lutmask);
+ return &image;
+
+
+
+ }
+ //if there aren't 2 image inputs connected just return the first
+ return in1;
+ }
+ return nullptr;
+ }
+ void generate_LUT(){
+ //can check here if anything has changed
+ //cerr<<"generating LUT: threshold "<<parameters["threshold"]->value<<", feather "<<parameters["feather"]->value<<endl;
+ if (LUT) delete[] LUT;
+ LUT=new uint8_t[256];
+ float fltmax=(255.0f/256.0f);
+ float minf=max(0.0f,parameters["threshold"]->value-(parameters["feather"]->value*0.5f));
+ float maxf=min(1.0f,parameters["threshold"]->value+(parameters["feather"]->value*0.5f));
+ for (int i=0;i<256;i++){
+ LUT[i]=(uint8_t)(min(1.0f,max(0.0f,((((float)i)/255.0f)-minf)/(maxf-minf)))*255.0f);
+ // cerr<<((int)LUT[i])<<" ";
+ }
+ //cerr<<endl;
+ }
+ private:
+ cv::Mat greyfg,greybg,greyDiff,mask,filtmask,lutmask;
+ cv::Mat hsv1,hsv2;
+ uint8_t *LUT;
+ };
+ class Luma_levels: public Image_node {
+ public:
+ Luma_levels(){
+ create_image_input("image input","Image input");
+ create_parameter("black_in","number","input black point","Input black point",0.0f,0.0f,1.0f);
+ create_parameter("white_in","number","input white point","Input white point",1.0f,0.0f,1.0f);
+ create_parameter("gamma","number","gamma level","Gamma",1.0f,0.0f,10.0f);
+ create_parameter("black_out","number","output black point","Output black point",0.0f,0.0f,1.0f);
+ create_parameter("white_out","number","output white point","Output white point",1.0f,0.0f,1.0f);
+ title="Luma levels";
+ description="Remap luma values of image";
+ LUT=nullptr;
+ UID="4e500576-2d0b-11e3-b234-cf74b6a122e4";
+ };
+ Luma_levels(map<string,string> &settings):Luma_levels() {
+ base_settings(settings);
+ }
+ ~Luma_levels(){if (LUT) { delete[] LUT;} };
+ void generate_LUT(){
+ //can check here if anything has changed
+ if (LUT) delete[] LUT;
+ LUT=new unsigned char[256];
+ float fltmax=(255.0f/256.0f);
+ for (int i=0;i<256;i++){
+ LUT[i]=(unsigned char)(((pow(min(fltmax,max(0.0f,(((((float)i)/256.0f)-parameters["black_in"]->value)/(parameters["white_in"]->value-parameters["black_in"]->value)))),(1.0/parameters["gamma"]->value))*(parameters["white_out"]->value-parameters["black_out"]->value))+parameters["black_out"]->value)*255.0f);
+ }
+ }
+ void apply_LUT(const Image& in){
+ apply_LUT(in,image);
+ }
+ void apply_LUT(const Image& in,Image &out){ //facility to apply to other images for inherited classes
+ out.setup(in.w,in.h);
+ for (int i=0;i<out.w*out.h*3;i++){
+ out.RGBdata[i]=LUT[in.RGBdata[i]];
+ }
+ }
+ Image *output(const Frame_spec &frame){
+ Image *in=image_inputs[0]->get(frame);
+ if (in){
+ generate_LUT();
+ apply_LUT(*in);
+ }
+ return &image;
+ }
+ Luma_levels* clone(map<string,string> &_settings) { return new Luma_levels(_settings);};
+ protected:
+ unsigned char *LUT;
+ };
+ class Echo_trails: public Luma_levels {
+ //draw trail frames additively that fade off over time
+ //the hard thing here is how to cache frames, if its done cleverly it could have no impact when
+ //used linearly
+ //Image needs to overload operator+
+ //need a clever data structure to cache frames - maybe a map of Image pointers
+
+ //we know the frames we want to overlay as offsets ie -25,-20,-15,-10,-5
+ //do we keep 25 frames loaded in order to benefit? 25 PAL frames is 60MB so probably so
+ //OK so:
+ //make a new set of pointers
+ //identify if any of the new pointers can inherit old frames
+ //delete unneeded old frames
+ //load new frames
+ //do the calculations
+
+ //new set of pointers? or track frames by absolute frame number?
+ //with relative pointers and switching frames, could use auto_ptr?
+
+ //this cache mechanism should maybe be inheritable too?
+
+ //it could be hugely beneficial to only do the LUT once?
+ //although maybe the way to do the fading is to have a LUT for each frame?
+
+ //or is it actually best to use alpha keying after all!
+ public:
+ Echo_trails(){
+ //calls base class constructor first
+ create_parameter("number","number","number of echoes","Number echoes",25.0f);
+ create_parameter("fadeto","number","amount that echoes fade out","Fadout amount",1.0f,0.0f,1.0f);
+ create_attribute("mode","blend mode for echoes","Blend mode","screen",{"screen","wrap"});
+ title="Echo trails";
+ description="Draw trail frames additively that fade off over time";
+ UID="5b1ab684-2d0b-11e3-8fa2-970be8c360dd";
+ };
+ Echo_trails(map<string,string> &settings):Echo_trails() {
+ base_settings(settings);
+ lastframe=-1;
+ }
+ ~Echo_trails(){
+ for (auto i:images) delete i.second;
+ };
+ Image *output(const Frame_spec &frame){
+ //check if cache is valid
+ if (images.size()){
+ if (frame.w!=image.w||frame.h!=image.h){ //or framerate changed?
+ //clear cache and start over
+ images.clear();
+ lastframe=-1;
+ //calculate frame interval
+ //interval=(int)(((duration/number)*frame.framerate)+0.5);
+ //total=interval*number;
+ }
+ }
+ int thisframe=frame.frame();
+ //iterate cache and throw out any obsolete frames
+ auto i = std::begin(images);
+ while (i != std::end(images)) {
+ // check if the image is in the range we need
+ if (thisframe-(*i).first>(int)parameters["number"]->value||thisframe-(*i).first<0) {
+ delete (*i).second;
+ i = images.erase(i);
+ }
+ else
+ ++i;
+ }
+ //if frame has already been calculated just return it
+ if (thisframe!=lastframe) {
+ Image *in=image_inputs[0]->get(frame);
+ if (in) {
+ generate_LUT();
+ //need a better strategy here, should be able to get each image once
+ //copy incoming image **writable
+ image=*(in);
+ images[thisframe]=new Image(frame.w,frame.h);
+ apply_LUT(image,*(images[thisframe]));
+ for (int i=1;i<(int)parameters["number"]->value;i++){
+ //check echo frame isn't at negative time
+ int absframe=thisframe-i;
+ if (absframe>-1){
+ //check if image is in the cache
+ if (images.find(absframe)==images.end()){
+ images[absframe]=new Image(frame.w,frame.h);
+ Frame_spec wanted=Frame_spec(absframe,frame.framerate,frame.duration,frame.w,frame.h);
+ apply_LUT(*(((Image_node*)image_inputs[0]->connection)->get_image_output(wanted)),*(images[absframe]));
+ }
+ if (fless(1.0f,parameters["fadeto"]->value)){
+ float amount=(((parameters["number"]->value-i)/parameters["number"]->value)*(1.0f-parameters["fadeto"]->value))+(1.0f-parameters["fadeto"]->value);
+ Image *temp=*images[absframe]*amount;
+ if (attributes["mode"]->value=="screen") {
+ image+=*temp;
+ }
+ else {
+ image.add_wrap(*temp);
+ }
+ delete temp;
+ }
+ else {
+ if (attributes["mode"]->value=="screen") image+=*(images[absframe]);
+ else image=image.add_wrap(*(images[absframe]));
+ }
+ }
+ }
+ //for (int i=0;i<frame.w*frame.h*3;i++){
+ // image->RGBdata[i]=LUT[in->RGBdata[i]];
+ //}
+ lastframe=thisframe;
+ }
+ }
+ return &image;
+ }
+ Echo_trails* clone(map<string,string> &_settings) { return new Echo_trails(_settings);};
+ protected:
+ int interval,total,lastframe; //number of frames between displayed echoes
+ unordered_map<int,Image*> images;
+ };
+ class RGB_levels: public Image_node {
+ public:
+ RGB_levels(){
+ create_image_input("image input","Image input");
+ create_parameter("red_black_in","number","Red input black-point","Red input black-point",0.0f,0.0f,1.0f);
+ create_parameter("red_white_in","number","Red input white-point","Red input white-point",1.0f,0.0f,1.0f);
+ create_parameter("red_gamma","number","Red gamma level","Red gamma",1.0f,0.01f,10.0f);
+ create_parameter("red_black_out","number","Red output black point","Red output black point",0.0f,0.0f,1.0f);
+ create_parameter("red_white_out","number","Red output white point","Red output white point",1.0f,0.0f,1.0f);
+ create_parameter("green_black_in","number","Green input black point","Green input black point",0.0f,0.0f,1.0f);
+ create_parameter("green_white_in","number","Green input white point","Green input white point",1.0f,0.0f,1.0f);
+ create_parameter("green_gamma","number","Green gamma level","Green gamma",1.0f,0.01f,10.0f);
+ create_parameter("green_black_out","number","Green output black point","Green output black point",0.0f,0.0f,1.0f);
+ create_parameter("green_white_out","number","Green output white point","Green output white point",1.0f,0.0f,1.0f);
+ create_parameter("blue_black_in","number","Blue input black point","Blue input black point",0.0f,0.0f,1.0f);
+ create_parameter("blue_white_in","number","Blue input white point","Blue input white point",1.0f,0.0f,1.0f);
+ create_parameter("blue_gamma","number","Blue gamma level","Blue gamma",1.0f,0.01f,10.0f);
+ create_parameter("blue_black_out","number","Blue output black point","Blue output black point",0.0f,0.0f,1.0f);
+ create_parameter("blue_white_out","number","Blue output white point","Blue output white point",1.0f,0.0f,1.0f);
+ title="RGB levels";
+ description="Remap RGB values of image";
+ LUT=nullptr;
+ UID="68522cba-2d0b-11e3-8767-8f3c605e9bed";
+ };
+ RGB_levels(map<string,string> &settings):RGB_levels() {
+ base_settings(settings);
+ }
+ ~RGB_levels(){
+ if (LUT) {
+ for (int i=0;i<3;i++) {
+ delete[] LUT[i];
+ }
+ delete[] LUT;
+ }
+ };
+ void generate_LUT(){
+ //can check here if anything has changed
+ if (LUT) {
+ for (int i=0;i<3;i++) {
+ delete[] LUT[i];
+ }
+ delete[] LUT;
+ }
+ LUT=new unsigned char*[3];
+ for (int i=0;i<3;i++){
+ LUT[i]=new unsigned char[256];
+ }
+ float fltmax=(255.0f/256.0f);
+ for (int i=0;i<256;i++){
+ LUT[0][i]=(unsigned char)(((\
+ pow(min(fltmax,max(0.0f,(((((float)i)/256.0f)-parameters["red_black_in"]->value)/(parameters["red_white_in"]->value-parameters["red_black_in"]->value))))\
+ ,(1.0/parameters["red_gamma"]->value))\
+ *(parameters["red_white_out"]->value-parameters["red_black_out"]->value))+parameters["red_black_out"]->value)*255.0f);
+ LUT[1][i]=(unsigned char)(((\
+ pow(min(fltmax,max(0.0f,(((((float)i)/256.0f)-parameters["green_black_in"]->value)/(parameters["green_white_in"]->value-parameters["green_black_in"]->value))))\
+ ,(1.0/parameters["green_gamma"]->value))\
+ *(parameters["green_white_out"]->value-parameters["green_black_out"]->value))+parameters["green_black_out"]->value)*255.0f);
+ LUT[2][i]=(unsigned char)(((\
+ pow(min(fltmax,max(0.0f,(((((float)i)/256.0f)-parameters["blue_black_in"]->value)/(parameters["blue_white_in"]->value-parameters["blue_black_in"]->value))))\
+ ,(1.0/parameters["blue_gamma"]->value))\
+ *(parameters["blue_white_out"]->value-parameters["blue_black_out"]->value))+parameters["blue_black_out"]->value)*255.0f);
+ }
+ }
+ void apply_LUT(const Image& in){
+ apply_LUT(in,image);
+ }
+ void apply_LUT(const Image& in,Image &out){ //facility to apply to other images for inherited classes
+ for (int i=0;i<out.w*out.h;i++){
+ out.RGBdata[i*3]=LUT[0][in.RGBdata[i*3]];
+ out.RGBdata[i*3+1]=LUT[1][in.RGBdata[i*3+1]];
+ out.RGBdata[i*3+2]=LUT[2][in.RGBdata[i*3+2]];
+ }
+ if (!in.alpha.empty()){
+ out.alpha=in.alpha;
+ }
+ }
+ Image *output(const Frame_spec &frame){
+ Image *in=image_inputs[0]->get(frame);
+ if (in){
+ generate_LUT();
+ apply_LUT(*in);
+ }
+ return &image;
+ }
+ RGB_levels* clone(map<string,string> &_settings) { return new RGB_levels(_settings);};
+ protected:
+ unsigned char **LUT;
+ };
+}
+#endif
diff --git a/rotord/src/nodes_signals.h b/rotord/src/nodes_signals.h
new file mode 100644
index 0000000..bcc2769
--- /dev/null
+++ b/rotord/src/nodes_signals.h
@@ -0,0 +1,103 @@
+#ifndef ROTOR_NODES_SIGNALS
+#define ROTOR_NODES_SIGNALS
+
+#include "rotor.h"
+
+namespace Rotor {
+ class Time: public Signal_node {
+ public:
+ Time(){
+ title="Time";
+ description="Outputs the time in seconds as a signal";
+ UID="432b0d1e-2d09-11e3-a8b9-e3affcfd2b31";
+ };
+ Time(map<string,string> &settings): Time() {
+ base_settings(settings);
+ };
+ Time* clone(map<string,string> &_settings) { return new Time(_settings);};
+ const float output(const Time_spec &time) {
+ return time.time;
+ }
+ };
+ class Track_time: public Signal_node {
+ public:
+ Track_time(){
+ title="Track time";
+ description="Outputs the fraction of the track as a signal";
+ UID="5892933e-2d09-11e3-8f2e-47c1defdb1d7";
+ };
+ Track_time(map<string,string> &settings): Track_time() {
+ base_settings(settings);
+ };
+ Track_time* clone(map<string,string> &_settings) { return new Track_time(_settings);};
+ const float output(const Time_spec &time) {
+ return time.time/time.duration;
+ }
+ };
+ class At_track_time: public Signal_node {
+ public:
+ At_track_time(){
+ create_signal_input("signal","Signal Input");
+ create_parameter("time","number","Track time to evaluate","Time",0.0f);
+ title="@Track time";
+ description="Gets input from a different point in the track";
+ UID="6a3edb9c-2d09-11e3-975c-df9df6d19f0a";
+ };
+ At_track_time(map<string,string> &settings): At_track_time() {
+ base_settings(settings);
+ };
+ At_track_time* clone(map<string,string> &_settings) { return new At_track_time(_settings);};
+ const float output(const Time_spec &time) {
+ Time_spec t=Time_spec(parameters["time"]->value*time.duration,time.framerate,time.duration);
+ return inputs[0]->get(t);
+ }
+ };
+ class Signal_output: public Signal_node {
+ public:
+ Signal_output(){
+ create_signal_input("signal","Signal Input");
+ title="Signal output";
+ description="Outputs a signal to xml for testing";
+ UID="74773190-2d09-11e3-ae26-7f2bb9af632c";
+ };
+ Signal_output(map<string,string> &settings): Signal_output() {
+ base_settings(settings);
+ };
+ Signal_output* clone(map<string,string> &_settings) { return new Signal_output(_settings);};
+ bool render(const float duration, const float framerate,string &xml_out){
+ //lost this somewhere
+ return true;
+ }
+ const float output(const Time_spec &time) {
+ return inputs[0]->get(time);
+ }
+ };
+ class Testcard: public Image_node {
+ public:
+ Testcard(){
+ //internal testing node only
+ };
+ Testcard(map<string,string> &settings): Testcard() {
+ base_settings(settings);
+ };
+ ~Testcard(){};
+ Testcard* clone(map<string,string> &_settings) { return new Testcard(_settings);};
+ Image *output(const Frame_spec &frame){
+ float hs=(255.0f/frame.h);
+ for (int i=0;i<frame.h;i++){
+ for (int j=0;j<frame.w;j++){
+ image.RGBdata[(i*frame.w+j)*3]=(uint8_t)((int)((i+(frame.time*25.0f)*hs))%255);
+ image.RGBdata[((i*frame.w+j)*3)+1]=(uint8_t)((int)((j+(frame.time*100.0f)*hs))%255);
+ image.RGBdata[((i*frame.w+j)*3)+2]=(uint8_t)(0);
+ //image->Adata[i*frame.w+j]=(uint8_t)255;
+ //image->Zdata[i*frame.w+j]=(uint16_t)512; //1.0 in fixed point 8.8 bits
+ }
+ }
+ return &image;
+ }
+ private:
+
+ };
+}
+
+#endif
diff --git a/rotord/src/nodes_source.h b/rotord/src/nodes_source.h
new file mode 100644
index 0000000..3ce9fdd
--- /dev/null
+++ b/rotord/src/nodes_source.h
@@ -0,0 +1,74 @@
+#ifndef ROTOR_NODES_SOURCE
+#define ROTOR_NODES_SOURCE
+
+#include "rotor.h"
+
+namespace Rotor {
+ class Signal_colour: public Image_node {
+ public:
+ Signal_colour(){
+ create_signal_input("Selector","Selector input");
+ create_attribute("palette","palette list of web colours","Colour palette","000000");
+ title="Signal colour";
+ description="Cycles through a palette of background colours according to selector signal";
+ UID="a2183fe0-2d09-11e3-9a64-538ee2cf40bc";
+ };
+ Signal_colour(map<string,string> &settings):Signal_colour() {
+ base_settings(settings);
+ for (int i=0;i<attributes["palette"]->value.size()/6;i++){
+ palette.push_back(Colour(attributes["palette"]->value.substr(i*6,6)));
+ }
+ prevcol=-1;
+ };
+ ~Signal_colour(){};
+ Image *output(const Frame_spec &frame){
+ if (palette.size()) {
+ int col=((int)inputs[0]->get((Time_spec)frame))%palette.size();
+ //if (col!=prevcol){ //how about when starting a new render?
+ for (int i=0;i<image.w*image.h;i++){
+ image.RGBdata[i*3]=palette[col].r;
+ image.RGBdata[i*3+1]=palette[col].g;
+ image.RGBdata[i*3+2]=palette[col].b;
+ }
+ prevcol=col;
+ //}
+ return &image;
+ }
+ return nullptr;
+ }
+ Signal_colour* clone(map<string,string> &_settings) { return new Signal_colour(_settings);};
+ private:
+ vector<Colour> palette;
+ int prevcol;
+ };
+ class Signal_greyscale: public Image_node {
+ //Draws signal bars in greyscale
+ public:
+ Signal_greyscale(){
+ create_signal_input("Signal","Signal input");
+ title="Signal greyscale";
+ description="Renders signal level as greyscale background";
+ UID="ae91b8a0-2d09-11e3-aa7d-8b7f1ef1a439";
+ };
+ Signal_greyscale(map<string,string> &settings):Signal_greyscale() {
+ base_settings(settings);
+ prevcol=-1;
+ };
+ ~Signal_greyscale(){};
+ Image *output(const Frame_spec &frame){
+ uint8_t col=((uint8_t)(inputs[0]->get((Time_spec)frame)*255.0f));
+ if (col!=prevcol){ //how about when starting a new render?
+ for (int i=0;i<image.w*image.h*3;i++){
+ image.RGBdata[i]=col;
+ }
+ prevcol=col;
+ }
+ return &image;
+
+ }
+ Signal_greyscale* clone(map<string,string> &_settings) { return new Signal_greyscale(_settings);};
+ private:
+ uint8_t prevcol;
+ };
+}
+#endif
diff --git a/rotord/src/nodes_transform.h b/rotord/src/nodes_transform.h
index 99f553e..cced1b0 100644
--- a/rotord/src/nodes_transform.h
+++ b/rotord/src/nodes_transform.h
@@ -67,7 +67,12 @@ namespace Rotor {
//normally a scale of 1 will place the image on screen at pixel size
//it should be that a scale of 1 places it at width w
//how to scale around the centre
+
+ //using mipmaps:
cv::Mat inter;
+ int level=(int)pow(2, ceil(log(1.0f/(s*((float)in.w/(float)image.w)))/log(2)));
+
+
//if (s<1){
// if (s<.01) s=.01;
// float scalefac=((float)image.w/in->w)*s;
diff --git a/rotord/src/rotor.h b/rotord/src/rotor.h
index 0a81b36..49c6b77 100644
--- a/rotord/src/rotor.h
+++ b/rotord/src/rotor.h
@@ -41,6 +41,29 @@ Requirements
or deliver segment information with a signal
+
+-thoughts on styles
+
+ we can define the properties of a successful style
+
+ 1. it has to work with a selection of music in 1 or more genres
+ 2. it has to work as an edited video
+ 3, it must not 'look like the same video' with different inputs
+
+ the styles we are working on so far are probably trying to be too original.
+
+ we are in the business of selling commercial art. its about giving people what they know
+ they like, not about expanding their consciousnesses.
+
+ can we make a decent techno style that creates something that young/ aspiring techno musicians will relate to?
+
+ can we make a style themed around a fashion i.e goth/emo
+
+ can we make a style about a place or a thing with an image i.e americana, poolside
+
+ this is why I think Sasha's video is the most succesful attempt so far- it takes a
+ recognisable style that people understand and deconstructs it.
+
-------------------------*/