From 58f154e844aed7ba06df3ac570521930de66b744 Mon Sep 17 00:00:00 2001 From: Tim Redfern Date: Thu, 14 Jun 2012 02:04:16 +0100 Subject: mostly good --- gaunt01/bin/data/settings.xml | 2 +- gaunt01/src/bird.cpp | 13 +- gaunt01/src/bird.h | 2 + gaunt01/src/main.cpp | 2 +- gaunt01/src/testApp.cpp | 271 ++++++++++++++++++++++++++++++++++++++---- gaunt01/src/testApp.h | 38 +++++- 6 files changed, 297 insertions(+), 31 deletions(-) (limited to 'gaunt01') diff --git a/gaunt01/bin/data/settings.xml b/gaunt01/bin/data/settings.xml index f482957..db07922 100644 --- a/gaunt01/bin/data/settings.xml +++ b/gaunt01/bin/data/settings.xml @@ -1,5 +1,5 @@ - + diff --git a/gaunt01/src/bird.cpp b/gaunt01/src/bird.cpp index da246d1..2c4d5f2 100644 --- a/gaunt01/src/bird.cpp +++ b/gaunt01/src/bird.cpp @@ -178,7 +178,7 @@ void bird::update(map& players, float angle){ if (position.z<-ofGetHeight()/40) { if (diveRate<2.0f) { //increase dive rate - diveRate+=0.2f; + diveRate+=0.1f; } } else { @@ -212,6 +212,7 @@ void bird::update(map& players, float angle){ //do the documentation //go back to the bird - watch out for dead players (from trap doors) - bird is going out of play - just retrieve it for now? //flap faster while climbing + swoop + //bird noises heading=heading+(turnRate*timeSeg); while (heading>180) heading=heading-360; @@ -255,10 +256,12 @@ void bird::drawDebug(){ ofSphere(edgepoint,2.0f); */ - ofSetHexColor(0xff00ff); - char numStr[64]; - sprintf(numStr, "dive: %4.2f\nheight: %4.2f", diveRate,position.z); - ofDrawBitmapString(numStr,10,10); + if (DEBUG) { + ofSetHexColor(0xff00ff); + char numStr[64]; + sprintf(numStr, "dive: %4.2f\nheight: %4.2f", diveRate,position.z); + ofDrawBitmapString(numStr,10,10); + } } diff --git a/gaunt01/src/bird.h b/gaunt01/src/bird.h index f413911..11585c2 100644 --- a/gaunt01/src/bird.h +++ b/gaunt01/src/bird.h @@ -48,6 +48,8 @@ #include "ofxRay.h" +#define DEBUG 0 + class bird { public: diff --git a/gaunt01/src/main.cpp b/gaunt01/src/main.cpp index d8ec5c8..1f3ac12 100644 --- a/gaunt01/src/main.cpp +++ b/gaunt01/src/main.cpp @@ -6,7 +6,7 @@ int main( ){ ofAppGlutWindow window; - ofSetupOpenGL(&window, 1024,768, OF_WINDOW ); // <-------- setup the GL context + ofSetupOpenGL(&window, 1024,768, OF_FULLSCREEN ); // <-------- setup the GL context printf("%ix%i on screen %ix%i\n",ofGetWidth(),ofGetHeight(),ofGetScreenWidth(),ofGetScreenHeight()); // this kicks off the running of my app // can be OF_WINDOW or OF_FULLSCREEN diff --git a/gaunt01/src/testApp.cpp b/gaunt01/src/testApp.cpp index 749f6d3..bb9bb10 100644 --- a/gaunt01/src/testApp.cpp +++ b/gaunt01/src/testApp.cpp @@ -30,8 +30,6 @@ void testApp::setup(){ loadSettings("settings.xml"); - vidPlayer.loadMovie("camoutput.mov"); //footage/ camera needs to be the same res as opencv planes and output - vidPlayer.setLoopState(OF_LOOP_NORMAL); vidGrabber.setVerbose(true); if (vidGrabber.initGrabber(640,480)) { hasCamera=true; @@ -41,19 +39,45 @@ void testApp::setup(){ { hasCamera=false; useCamera=false; + vidPlayer.loadMovie("camoutput.mov"); //footage/ camera needs to be the same res as opencv planes and output + vidPlayer.setLoopState(OF_LOOP_NORMAL); vidPlayer.play(); } + /* accumImg.allocate(640,480); bgImg.allocate(640,480); bgImg.setUseTexture(true); - colorImg.allocate(640,480); - colorImg.setUseTexture(true); + grayImage.allocate(640,480); grayBg.allocate(640,480); + */ + + colorImg.allocate(640,480); + colorImg.setUseTexture(true); + currentFrame.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); + background.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); + background.setUseTexture(true); + + grayFrame.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); + grayBg.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); + grayDiff.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); + + mogoutput.allocate(CAM_WIDTH_FG, CAM_HEIGHT_FG); + + learningRate = 0.01f; + bFirstFrame=true; + + diffchannel=chan_V; + hsvback = cvCreateImage(cvGetSize(currentFrame.getCvImage()), currentFrame.getCvImage()->depth, currentFrame.getCvImage()->nChannels); + //backchan = cvCreateImage(cvGetSize(currentFrame.getCvImage()), 8, 1); + + removeShadows=false; + shadowThreshold=10; + //////////////////////////// blobsManager.normalizePercentage = 0.7; blobsManager.giveLowestPossibleIDs = false; @@ -437,6 +461,8 @@ void testApp::update(){ } colorImg.updateTexture(); + + /* grayImage = colorImg; @@ -453,15 +479,6 @@ void testApp::update(){ bgImg=accumImg; bgImg.updateTexture(); - //test the scaling - - /* - grayImage.resize(windowWidth,windowHeight); - if (bLearnBakground == true){ - grayBg = grayImage; // the = sign copys the pixels from grayImage into grayBg (operator overloading) - bLearnBakground = false; - } - */ grayDiff.clear(); grayDiff.allocate(640,480); @@ -477,6 +494,178 @@ void testApp::update(){ //hard coded size threshold of 100 pix contourFinder.findContours(grayDiff, 200, (640*480)/3, 20, false); // don't find holes + + */ + + cv::Mat img = colorImg.getCvImage(); + + //if (frameno%1==0) { //I THINK THIS APPROACH IS OK to attempt to lower cpu hit from accumulating? + + //cv::Rect roi(0, 0, 32, 32); //doesn't seem that easy to apply the ROI weighted and you still have to convert a whole image each frame? + + //cv::Mat imgroi = img(roi); + + if (bFirstFrame) { + img.convertTo(accumulator, CV_32FC3); + bFirstFrame=false; + } + + cv::Mat im3; + img.convertTo(im3, CV_32FC3); + + + + //accumulator; + accumulateWeighted(im3, accumulator, max(1.0f/(ofGetElapsedTimef()*30.0f),learningRate)); + accumulator.convertTo(outmat,CV_8UC3); + + IplImage* tmp = new IplImage(outmat); + background=tmp; + background.updateTexture(); + + //printf("tmp: %ix%i channels: %i depth:%i\n",tmp->width,tmp->height,tmp->nChannels,tmp->depth); + + + //get correct channel into backchan + + vector chans; + + //to remove shadows, need hsv of foreground and background + if (diffchannel>chan_B||removeShadows) cvtColor(outmat, hsvback, CV_BGR2HSV); + switch (diffchannel) { + case chan_R: + split(outmat,chans); + chans[0].copyTo(backchan); + break; + case chan_G: + split(outmat,chans); + chans[1].copyTo(backchan); + break; + case chan_B: + split(outmat,chans); + chans[2].copyTo(backchan); + break; + case chan_H: + split(hsvback,chans); + chans[0].copyTo(backchan); + break; + case chan_S: + split(hsvback,chans); + chans[1].copyTo(backchan); + break; + case chan_V: + split(hsvback,chans); + chans[2].copyTo(backchan); + break; + } + + + tmp = new IplImage(backchan); + grayBg = tmp; + + //} + //first, optionally remove shadows from FG + //possibly use 1/4 screen res? + + //to remove shadows, need hsv of foreground and background + if (diffchannel>chan_B||removeShadows) cvtColor(img, hsvfront, CV_BGR2HSV); + + cv::Mat outimg; + + if (removeShadows) { + vector slicesFront, slicesBack; + cv::Mat valFront, valBack, satFront, satBack; + + // split image to H,S and V images + split(hsvfront, slicesFront); + split(hsvback, slicesBack); + + slicesFront[2].copyTo(valFront); // get the value channel + slicesFront[1].copyTo(satFront); // get the sat channel + + slicesBack[2].copyTo(valBack); // get the value channel + slicesBack[1].copyTo(satBack); // get the sat channel + + int x,y; + for(x=0; x(y,x)[0] > satBack.at(y,x)[0]-shadowThreshold) && + (satFront.at(y,x)[0] < satBack.at(y,x)[0]+shadowThreshold)); + + if(sat && (valFront.at(y,x)[0] < valBack.at(y,x)[0])) { + hsvfront.at(y,x)[0]= hsvback.at(y,x)[0]; + hsvfront.at(y,x)[1]= hsvback.at(y,x)[1]; + hsvfront.at(y,x)[2]= hsvback.at(y,x)[2]; + } + + } + } + + //convert back into RGB if necessary + + if (diffchannel chans; + split(outimg,chans); + + switch (diffchannel) { + case chan_R: + chans[0].copyTo(frontchan); + break; + case chan_G: + chans[1].copyTo(frontchan); + break; + case chan_B: + chans[2].copyTo(frontchan); + break; + case chan_H: + chans[0].copyTo(frontchan); + break; + case chan_S: + chans[1].copyTo(frontchan); + break; + case chan_V: + chans[2].copyTo(frontchan); + break; + } + + //IplImage* tmp = new IplImage(outmat); + tmp = new IplImage(frontchan); + grayFrame = tmp; + + grayDiff.clear(); + grayDiff.allocate(640,480); + + // take the abs value of the difference between background and incoming and then threshold: + grayDiff.absDiff(grayBg, grayFrame); + grayDiff.threshold(threshold); + //grayFrame.adaptiveThreshold( threshold,10,false,true); //int blockSize, int offset=0,bool invert=false, bool gauss=false); + //grayDiff.erode_3x3(); + //grayDiff.resize(windowWidth,windowHeight); + +/* + //MOG + mog(img, outmat, mogf); + + // Complement the image + //cv::threshold(outmat, output, threshold, 255, cv::THRESH_BINARY_INV); + IplImage* tmp1 = new IplImage(outmat); + //printf("tmp: %ix%i channels: %i depth:%i\n",tmp->width,tmp->height,tmp->nChannels,tmp->depth); + //printf("grayDiff: %ix%i channels: %i depth:%i\n",grayDiff.getCvImage()->width,grayDiff.getCvImage()->height,grayDiff.getCvImage()->nChannels,grayDiff.getCvImage()->depth); + grayDiff=tmp1; //copy to ofx +*/ + + grayDiff.resize(windowWidth,windowHeight); //wasteful?? + + contourFinder.findContours(grayDiff, 500, (640*480)/3, 20, false); // find holes blobsManager.update(contourFinder.blobs); //check players against blob ids - bland do ray casting, update players @@ -557,9 +746,11 @@ void testApp::draw(){ //should be in front with holes being recreated for activated trapdoors ofSetHexColor(0xffffff); - bindTexture(bgImg); + if (mode==CALIBRATE) bindTexture(colorImg); + else bindTexture(background); ground.draw(); - unbindTexture(bgImg); + if (mode==CALIBRATE) unbindTexture(colorImg); + else unbindTexture(background); ofPushMatrix(); ofRotate(cam_angle,1,0,0); @@ -579,7 +770,7 @@ void testApp::draw(){ glDisable(GL_BLEND); ofPopMatrix(); - + glDisable(GL_DEPTH_TEST); ofSetHexColor(0xffffff); bindTexture(colorImg); //colorImg.getTextureReference().bind(); @@ -746,12 +937,15 @@ void testApp::draw(){ char numStr[16]; sprintf(numStr, "%i", blobsManager.blobs[i].id); ofDrawBitmapString(numStr, blob.boundingRect.x, blob.boundingRect.y); + + /* ofPushMatrix(); ofRotate(cam_angle,1,0,0); ofTranslate(players[blobsManager.blobs.at(i).id].getWorldPosition()); ofBox(0,-10,0,20); //TODO get this into plane axis ofPopMatrix(); + */ // } @@ -773,13 +967,15 @@ void testApp::draw(){ if (drawStats||mode==CALIBRATE) { ofSetHexColor(0xffffff); char reportStr[1024]; - sprintf(reportStr, "threshold %i\nnum blobs found %i, fps: %f", threshold, contourFinder.nBlobs, ofGetFrameRate()); - ofDrawBitmapString(reportStr, 10, windowHeight-40); + sprintf(reportStr, "threshold %i",threshold); //\nnum blobs found %i, fps: %f", threshold, contourFinder.nBlobs, ofGetFrameRate()); + ofDrawBitmapString(reportStr, 10, windowHeight-30); + /* char numStr[16]; for(int i=0;i 255) threshold = 255; @@ -819,7 +1012,34 @@ void testApp::keyPressed(int key){ case '0': mode=CALIBRATE; break; - case '1': + + case '1': + diffchannel = chan_R; + break; + case '2': + diffchannel = chan_G; + break; + case '3': + diffchannel = chan_B; + break; + case '4': + diffchannel = chan_H; + break; + case '5': + diffchannel = chan_S; + break; + case '6': + diffchannel = chan_V; + break; + + /* + case 's': + removeShadows=!removeShadows; + printf(removeShadows?"removing shadows\n":"not removing shadows\n"); + break; + + + case '1': if (Bird.currentseq!="hover") { //mesh.sequences["trans_flaphover"].stopAt(0.3); //mesh.sequences["trans_flaphover"].start(); @@ -859,6 +1079,7 @@ void testApp::keyPressed(int key){ Bird.currentseq="attack"; } break; + */ /* case 'y': light.setPosition(light.getX(),light.getY()-100,light.getZ()); @@ -891,11 +1112,13 @@ void testApp::keyPressed(int key){ drawingborder=true; } else drawingborder=false; - break; + /* case '>': gameState=(gameState+1)%4; gameStart=ofGetElapsedTimef(); break; + + */ } } @@ -950,6 +1173,7 @@ void testApp::loadSettings(string filename){ }else{ cam_angle=ofToInt(XML.getAttribute("gauntlet","cam_angle","none",0)); threshold=ofToInt(XML.getAttribute("gauntlet","threshold","none",0)); + diffchannel=ofToInt(XML.getAttribute("gauntlet","keyChannel","none",0)); if(XML.pushTag("bounds")) { for (int i=0;i