#include "testApp.h" //-------------------------------------------------------------- //units ~ 10cm // /* Can use a floating point image or array to accumulate the screen and generate averaged background? Is this too much work for every frame? Should it be put in a seperate thread? */ void testApp::setup(){ printf("setup: %ix%i on screen %ix%i\n",ofGetWidth(),ofGetHeight(),ofGetScreenWidth(),ofGetScreenHeight()); int windowMode = ofGetWindowMode(); if(windowMode == OF_FULLSCREEN){ windowWidth = ofGetScreenWidth(); windowHeight = ofGetScreenHeight(); } else if(windowMode == OF_WINDOW){ windowWidth = ofGetWidth(); windowHeight = ofGetHeight(); } vidPlayer.loadMovie("cam-grass-01.mov"); //camoutput3.mov"); // //footage/ camera needs to be the same res as opencv planes and output vidPlayer.setLoopState(OF_LOOP_NORMAL); vidPlayer.play(); currentFrame.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); background.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); grayFrame.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); grayBg.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); grayDiff.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); mogoutput.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); learningRate = 0.1f; bFirstFrame=true; threshold=10.0; mogf=0.008; /* fg = new ofxOpenCvUtilsForeground(); //method(ACCUMULATE_WEIGHTED) //,learningRate(0.01) //,threshold(10) //,blockSize(6) //,blur(0) //,backgroundSet(false)); fg->threshold = 10.0f; //threshold; fg->learningRate = 0.01f; // learningRate; fg->blockSize = 5.0f; //blockSize; fg->blur = 1.0f; //blockSize; fg->shadowThreshold = 10.0f; //shadowThreshold; meanShift = new ofxOpenCvUtilsMeanShift(); resultFrame.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); blobFrame.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); trackFrame.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); selectEnabled = false; */ ofSetFrameRate(30); frameno=0; diffchannel=chan_V; hsvback = cvCreateImage(cvGetSize(currentFrame.getCvImage()), currentFrame.getCvImage()->depth, currentFrame.getCvImage()->nChannels); //outchan = cvCreateImage(cvGetSize(currentFrame.getCvImage()), 8, 1); } //-------------------------------------------------------------- void testApp::update(){ bool bNewFrame = false; vidPlayer.idleMovie(); bNewFrame = vidPlayer.isFrameNew(); if (bNewFrame) { currentFrame.setFromPixels(vidPlayer.getPixels(), CAM_WIDTH_FG, CAM_HEIGHT_FG); if (frameno%10==0) { //I THINK THIS APPROACH IS OK //cv::Rect roi(0, 0, 32, 32); //doesn't seem that easy to apply the ROI weighted and you still have to convert a whole image each frame? cv::Mat img = currentFrame.getCvImage(); //cv::Mat imgroi = img(roi); if (bFirstFrame) { img.convertTo(accumulator, CV_32FC3); bFirstFrame=false; } cv::Mat im3; img.convertTo(im3, CV_32FC3); //accumulator; accumulateWeighted(im3, accumulator, max(1.0f/(ofGetElapsedTimef()*30.0f),learningRate)); accumulator.convertTo(outmat,CV_8UC3); IplImage* tmp = new IplImage(outmat); background=tmp; //printf("tmp: %ix%i channels: %i depth:%i\n",tmp->width,tmp->height,tmp->nChannels,tmp->depth); //get correct channel into outchan vector chans; if (diffchannel>chan_B) cvtColor(outmat, hsvback, CV_BGR2HSV); switch (diffchannel) { case chan_R: split(outmat,chans); chans[0].copyTo(outchan); break; case chan_G: split(outmat,chans); chans[1].copyTo(outchan); break; case chan_B: split(outmat,chans); chans[2].copyTo(outchan); break; case chan_H: split(hsvback,chans); chans[0].copyTo(outchan); break; case chan_S: split(hsvback,chans); chans[1].copyTo(outchan); break; case chan_V: split(hsvback,chans); chans[2].copyTo(outchan); break; } tmp = new IplImage(outchan); //printf("tmp: %ix%i channels: %i depth:%i\n",tmp->width,tmp->height,tmp->nChannels,tmp->depth); //printf("grayBg: %ix%i channels: %i depth:%i\n",grayBg.getCvImage()->width,grayBg.getCvImage()->height,grayBg.getCvImage()->nChannels,grayBg.getCvImage()->depth); grayBg = tmp; } grayFrame = currentFrame; // take the abs value of the difference between background and incoming and then threshold: grayDiff.absDiff(grayBg, grayFrame); grayDiff.threshold(threshold); //grayFrame.adaptiveThreshold( threshold,10,false,true); //int blockSize, int offset=0,bool invert=false, bool gauss=false); //grayDiff.erode_3x3(); //grayDiff.resize(windowWidth,windowHeight); /* //MOG mog(img, outmat, mogf); // Complement the image //cv::threshold(outmat, output, threshold, 255, cv::THRESH_BINARY_INV); IplImage* tmp1 = new IplImage(outmat); //printf("tmp: %ix%i channels: %i depth:%i\n",tmp->width,tmp->height,tmp->nChannels,tmp->depth); //printf("grayDiff: %ix%i channels: %i depth:%i\n",grayDiff.getCvImage()->width,grayDiff.getCvImage()->height,grayDiff.getCvImage()->nChannels,grayDiff.getCvImage()->depth); grayDiff=tmp1; //copy to ofx */ contourFinder.findContours(grayDiff, 200, (640*480)/3, 20, false); // find holes /* if (!fg->backgroundSet) fg->setBackground(currentFrame.getCvImage()); if(isTracking) { // define ROI cv::Mat img = currentFrame.getCvImage(); cv::Mat img2; IplImage imgf = fg->update(currentFrame.getCvImage()); cvNot(&imgf, &imgf); resultFrame = &imgf; blobFrame = resultFrame; cv::Mat mask = blobFrame.getCvImage(); cv::threshold(mask,mask,128,255,cv::THRESH_BINARY_INV); cv::add(mask,img,img2); IplImage* tmp = new IplImage(img2); trackFrame = tmp; meanShift->update(tmp); return; } if(bNewFrame && fg->backgroundSet) { if((int)blur%2==0) { blur++; } fg->blur = blur; if(useMoG) { fg->setMethod(MIXTURE_OF_GUASSIAN); } else { fg->setMethod(ACCUMULATE_WEIGHTED); } // Update the fg IplImage img = fg->update(currentFrame.getCvImage()); cvNot(&img, &img); resultFrame = &img; blobFrame = resultFrame; } */ } } //-------------------------------------------------------------- void testApp::draw(){ frameno++; ofSetHexColor(0xffffff); background.draw(0, 0); grayDiff.draw(640, 0); //mogoutput.draw(640, 0); /* if(fg->backgroundSet) { resultFrame.draw(0, 0); //if(isTracking) { // trackFrame.draw(320,240); //} else { blobFrame.draw(640,0); //} } */ for(int i=0;i 255) threshold = 255; break; case '-': threshold --; mogf-=.001; if (threshold < 0) threshold = 0; break; case '1': diffchannel = chan_R; break; case '2': diffchannel = chan_G; break; case '3': diffchannel = chan_B; break; case '4': diffchannel = chan_H; break; case '5': diffchannel = chan_S; break; case '6': diffchannel = chan_V; break; } } //-------------------------------------------------------------- void testApp::keyReleased(int key){ } //-------------------------------------------------------------- void testApp::mouseMoved(int x, int y ){ } //-------------------------------------------------------------- void testApp::mouseDragged(int x, int y, int button){ } //-------------------------------------------------------------- void testApp::mousePressed(int x, int y, int button){ } //-------------------------------------------------------------- void testApp::mouseReleased(int x, int y, int button){ } //-------------------------------------------------------------- void testApp::windowResized(int w, int h){ } //-------------------------------------------------------------- void testApp::gotMessage(ofMessage msg){ } //-------------------------------------------------------------- void testApp::dragEvent(ofDragInfo dragInfo){ }