summaryrefslogtreecommitdiff
path: root/cvtest/src/testApp.cpp
diff options
context:
space:
mode:
authorTim Redfern <tim@eclectronics.org>2012-05-17 15:51:07 +0100
committerTim Redfern <tim@eclectronics.org>2012-05-17 15:51:07 +0100
commit8bc09d4264575d2752374413a180bf9dc1b3b035 (patch)
treeae362434a29b48aa66d1811ab1dfd6146f65b536 /cvtest/src/testApp.cpp
parentdbf9692b03ac2485f771993184222f7170e71cf2 (diff)
keying options
Diffstat (limited to 'cvtest/src/testApp.cpp')
-rw-r--r--cvtest/src/testApp.cpp119
1 files changed, 102 insertions, 17 deletions
diff --git a/cvtest/src/testApp.cpp b/cvtest/src/testApp.cpp
index a05cc49..cf5061d 100644
--- a/cvtest/src/testApp.cpp
+++ b/cvtest/src/testApp.cpp
@@ -25,7 +25,7 @@ void testApp::setup(){
}
- vidPlayer.loadMovie("camoutput3.mov"); //cam-grass-01.mov"); //footage/ camera needs to be the same res as opencv planes and output
+ vidPlayer.loadMovie("cam-grass-01.mov"); //camoutput3.mov"); // //footage/ camera needs to be the same res as opencv planes and output
vidPlayer.setLoopState(OF_LOOP_NORMAL);
vidPlayer.play();
@@ -39,7 +39,7 @@ void testApp::setup(){
mogoutput.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG);
- learningRate = 0.001f;
+ learningRate = 0.1f;
bFirstFrame=true;
threshold=10.0;
@@ -76,6 +76,11 @@ void testApp::setup(){
*/
ofSetFrameRate(30);
+ frameno=0;
+
+ diffchannel=chan_V;
+ hsvback = cvCreateImage(cvGetSize(currentFrame.getCvImage()), currentFrame.getCvImage()->depth, currentFrame.getCvImage()->nChannels);
+ //outchan = cvCreateImage(cvGetSize(currentFrame.getCvImage()), 8, 1);
}
@@ -92,38 +97,88 @@ void testApp::update(){
if (bNewFrame) {
currentFrame.setFromPixels(vidPlayer.getPixels(), CAM_WIDTH_FG, CAM_HEIGHT_FG);
- cv::Mat img = currentFrame.getCvImage();
- cv::Mat im3;
- img.convertTo(im3, CV_32FC3);
-
+
+ if (frameno%10==0) { //I THINK THIS APPROACH IS OK
+
+ //cv::Rect roi(0, 0, 32, 32); //doesn't seem that easy to apply the ROI weighted and you still have to convert a whole image each frame?
+ cv::Mat img = currentFrame.getCvImage();
+ //cv::Mat imgroi = img(roi);
+
if (bFirstFrame) {
img.convertTo(accumulator, CV_32FC3);
bFirstFrame=false;
}
+
+ cv::Mat im3;
+ img.convertTo(im3, CV_32FC3);
+
+
//accumulator;
accumulateWeighted(im3, accumulator, max(1.0f/(ofGetElapsedTimef()*30.0f),learningRate));
-
accumulator.convertTo(outmat,CV_8UC3);
+
IplImage* tmp = new IplImage(outmat);
- //printf("tmp: %ix%i channels: %i depth:%i\n",tmp->width,tmp->height,tmp->nChannels,tmp->depth);
- //printf("output: %ix%i channels: %i depth:%i\n",output.getCvImage()->width,output.getCvImage()->height,output.getCvImage()->nChannels,output.getCvImage()->depth);
background=tmp;
+
+ //printf("tmp: %ix%i channels: %i depth:%i\n",tmp->width,tmp->height,tmp->nChannels,tmp->depth);
+
+
+ //get correct channel into outchan
+
+ vector<cv::Mat> chans;
+
+ if (diffchannel>chan_B) cvtColor(outmat, hsvback, CV_BGR2HSV);
+ switch (diffchannel) {
+ case chan_R:
+ split(outmat,chans);
+ chans[0].copyTo(outchan);
+ break;
+ case chan_G:
+ split(outmat,chans);
+ chans[1].copyTo(outchan);
+ break;
+ case chan_B:
+ split(outmat,chans);
+ chans[2].copyTo(outchan);
+ break;
+ case chan_H:
+ split(hsvback,chans);
+ chans[0].copyTo(outchan);
+ break;
+ case chan_S:
+ split(hsvback,chans);
+ chans[1].copyTo(outchan);
+ break;
+ case chan_V:
+ split(hsvback,chans);
+ chans[2].copyTo(outchan);
+ break;
+ }
+
- //grayBg = background;
- //grayFrame = currentFrame;
+ tmp = new IplImage(outchan);
+ //printf("tmp: %ix%i channels: %i depth:%i\n",tmp->width,tmp->height,tmp->nChannels,tmp->depth);
+ //printf("grayBg: %ix%i channels: %i depth:%i\n",grayBg.getCvImage()->width,grayBg.getCvImage()->height,grayBg.getCvImage()->nChannels,grayBg.getCvImage()->depth);
+
+
+
+ grayBg = tmp;
+
+ }
+ grayFrame = currentFrame;
// take the abs value of the difference between background and incoming and then threshold:
- //grayDiff.absDiff(grayBg, grayFrame);
- //grayDiff.threshold(threshold);
- //grayDiff.adaptiveThreshold( threshold,1,false,true); //int blockSize, int offset=0,bool invert=false, bool gauss=false);
+ grayDiff.absDiff(grayBg, grayFrame);
+ grayDiff.threshold(threshold);
+ //grayFrame.adaptiveThreshold( threshold,10,false,true); //int blockSize, int offset=0,bool invert=false, bool gauss=false);
//grayDiff.erode_3x3();
//grayDiff.resize(windowWidth,windowHeight);
- //contourFinder.findContours(grayDiff, 200, (640*480)/3, 20, false); // don't find holes
+/*
//MOG
mog(img, outmat, mogf);
@@ -133,6 +188,8 @@ void testApp::update(){
//printf("tmp: %ix%i channels: %i depth:%i\n",tmp->width,tmp->height,tmp->nChannels,tmp->depth);
//printf("grayDiff: %ix%i channels: %i depth:%i\n",grayDiff.getCvImage()->width,grayDiff.getCvImage()->height,grayDiff.getCvImage()->nChannels,grayDiff.getCvImage()->depth);
grayDiff=tmp1; //copy to ofx
+*/
+ contourFinder.findContours(grayDiff, 200, (640*480)/3, 20, false); // find holes
@@ -194,6 +251,7 @@ void testApp::update(){
//--------------------------------------------------------------
void testApp::draw(){
+ frameno++;
ofSetHexColor(0xffffff);
@@ -213,10 +271,19 @@ void testApp::draw(){
//}
}
*/
+
+ for(int i=0;i<contourFinder.blobs.size();i++){
+
+ ofxCvBlob blob = contourFinder.blobs[i];
+ blob.draw(0,0);
+
+ }
+
+
ofSetHexColor(0xffffff);
char reportStr[1024];
- //sprintf(reportStr, "fps: %f\nthreshold: %f", ofGetFrameRate(),threshold);
- sprintf(reportStr, "fps: %f\nmog: %f", ofGetFrameRate(),mogf);
+ sprintf(reportStr, "fps: %f\nthreshold: %f", ofGetFrameRate(),threshold);
+ //sprintf(reportStr, "fps: %f\nmog: %f", ofGetFrameRate(),mogf);
ofDrawBitmapString(reportStr, 1100, 440);
@@ -235,6 +302,24 @@ void testApp::keyPressed(int key){
mogf-=.001;
if (threshold < 0) threshold = 0;
break;
+ case '1':
+ diffchannel = chan_R;
+ break;
+ case '2':
+ diffchannel = chan_G;
+ break;
+ case '3':
+ diffchannel = chan_B;
+ break;
+ case '4':
+ diffchannel = chan_H;
+ break;
+ case '5':
+ diffchannel = chan_S;
+ break;
+ case '6':
+ diffchannel = chan_V;
+ break;
}
}