Fast Compressive Tracking(快速压缩跟踪)算法的C++代码实现
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? 本文系原創,轉載請注明。
? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?有問題請留言或發郵箱:johnnycons@163.com
? ? ? ? ? ?因為實驗室項目工程的需要,最近在研究目標跟蹤算法。這里提的Fast Compressive Tracking (快速壓縮跟蹤)算法是張開華教授在其之前的Compressive Tracking 算法(網站看這里)上做了一些簡單的優化,本人測試的結果是FCT算法的處理速度在59幀/s左右(windows下),而之前的CT算法大概是29幀/s,修改后的速度還是不錯的(這里的工程都是讀圖片幀序列,后面給出的工程資源有自帶資源)。因為FCT的網站上沒有給出代碼的C++實現(只有MATLAB的代碼,網站看這里),這里就貼出我按照其論文的意思給出C++的代碼實現,這里的C++代碼也是從其CT算法的c++代碼上修改而來的,修改的地方我會在代碼中說明。另外為了將代碼能夠移植到linux上,我對代碼的初始運行的地方也做了修改,所以這里貼出的代碼是可以運行在linux上的。代碼中的注釋參考了zouxy09大神的博客,最后為了方便大家運行,我會將FCT C++工程以及linux下運行的代碼分別打包供大家下載。因為我也是初學者,能力有限,所以若文中有紕漏請讀者指正,有問題請留言或者發我郵箱,thanks。
---------------------------------------------------------------------------------------------------
2015/1/24 補充:該版本C++代碼沒有實現多尺度的情況。我在看論文的時候沒有仔細閱讀論文(細看了CT,而沒細看FCT),以為FCT論文中沒有給出尺度變換的設計。后來有網友指出論文中有尺度變換,罪過罪過,很久之前的東西了,所以暫時沒辦法更新代碼。
/************************************************************************ * File: CompressiveTracker.h * Brief: C++ demo for paper: Kaihua Zhang, Lei Zhang, Ming-Hsuan Yang,"Real-Time Compressive Tracking," ECCV 2012. * Version: 1.0 * Author: Yang Xian * Email: yang_xian521@163.com * Date: 2012/08/03 * History: * Revised by Kaihua Zhang on 14/8/2012, 23/8/2012 * Email: zhkhua@gmail.com * Homepage: http://www4.comp.polyu.edu.hk/~cskhzhang/ * Project Website: http://www4.comp.polyu.edu.hk/~cslzhang/CT/CT.htm ************************************************************************/ #pragma once #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <vector>using std::vector; using namespace cv; //--------------------------------------------------- class CompressiveTracker { public:CompressiveTracker(void);~CompressiveTracker(void);private:int featureMinNumRect;int featureMaxNumRect;int featureNum;//每個box的harr特征個數(也就是弱分類器個數) vector<vector<Rect>> features;vector<vector<float>> featuresWeight;int rOuterPositive;//在離上一幀跟蹤到的目標位置的距離小于rOuterPositive的范圍內采集 正樣本 vector<Rect> samplePositiveBox;//采集的正樣本box集 vector<Rect> sampleNegativeBox;//采集的負樣本box集 int rSearchWindow;//掃描窗口的大小,或者說檢測box的大小 Mat imageIntegral; //圖像的積分圖 Mat samplePositiveFeatureValue;//采集的正樣本的harr特征值 ???特征值是矩陣??Mat sampleNegativeFeatureValue;//采集的負樣本的harr特征值 //對每個樣本z(m維向量),它的低維表示是v(n維向量,n遠小于m)。假定v中的各元素是獨立分布的。 //假定在分類器H(v)中的條件概率p(vi|y=1)和p(vi|y=0)屬于高斯分布,并且可以用以下四個參數來描述: //分別是描述正負樣本的高斯分布的均值u和方差sigma vector<float> muPositive;vector<float> sigmaPositive;vector<float> muNegative;vector<float> sigmaNegative;float learnRate;//學習速率,控制分類器參數更新的步長 vector<Rect> detectBox; //需要檢測的box Mat detectFeatureValue;RNG rng; //隨機數 private:void HaarFeature(Rect& _objectBox, int _numFeature);void sampleRect(Mat& _image, Rect& _objectBox, float _rInner, float _rOuter, int _maxSampleNum, vector<Rect>& _sampleBox); <span style="font-family: Arial, Helvetica, sans-serif;">//這里sampleRect函數和CT算法稍微不同,增加了step參數用來表示不同的步長。</span> void sampleRect(Mat& _image, Rect& _objectBox, float _srw, vector<Rect>& _sampleBox,int step);void getFeatureValue(Mat& _imageIntegral, vector<Rect>& _sampleBox, Mat& _sampleFeatureValue);void classifierUpdate(Mat& _sampleFeatureValue, vector<float>& _mu, vector<float>& _sigma, float _learnRate);void radioClassifier(vector<float>& _muPos, vector<float>& _sigmaPos, vector<float>& _muNeg, vector<float>& _sigmaNeg,Mat& _sampleFeatureValue, float& _radioMax, int& _radioMaxIndex); public:void processFrame(Mat& _frame, Rect& _objectBox);void init(Mat& _frame, Rect& _objectBox); }; /************************************************************************ * File: RunTracker.cpp * Brief: C++ demo for paper: Kaihua Zhang, Lei Zhang, Ming-Hsuan Yang,"Real-Time Compressive Tracking," ECCV 2012. * Version: 1.0 * Author: Yang Xian * Email: yang_xian521@163.com * Date: 2012/08/03 * History: * Revised by Kaihua Zhang on 14/8/2012, 23/8/2012 * Email: zhkhua@gmail.com * Homepage: http://www4.comp.polyu.edu.hk/~cskhzhang/ * Project Website: http://www4.comp.polyu.edu.hk/~cslzhang/CT/CT.htm ************************************************************************/ #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <iostream> #include <fstream> #include <sstream> #include <stdio.h> #include <string.h> //#include <Windows.h> #include <time.h> #include "FastCompressiveTracker.h"using namespace cv; using namespace std;void readConfig(char* configFileName, char* imgFilePath, Rect &box,int &num); /* Description: read the tracking information from file "config.txt"Arguments: -configFileName: config file name-ImgFilePath: Path of the storing image sequences-box: [x y width height] intial tracking positionHistory: Created by Kaihua Zhang on 15/8/2012 */ void readImageSequenceFiles(char* ImgFilePath,vector <string> &imgNames,int &num); /* Description: search the image names in the image sequences Arguments:-ImgFilePath: path of the image sequence-imgNames: vector that stores image nameHistory: Created by Kaihua Zhang on 15/8/2012 */int main(int argc, char * argv[]) {time_t start,stop;start = time(NULL);//獲取程序開始運行的時間char imgFilePath[100];char conf[100];strcpy(conf,"./config.txt");//char tmpDirPath[MAX_PATH+1];//MAX_PATH在windows下是260char tmpDirPath[261];//this is a test for chengxin Rect box; // [x y width height] tracking positionint num;vector <string> imgNames;readConfig(conf,imgFilePath,box,num);//讀取視頻幀的配置信息readImageSequenceFiles(imgFilePath,imgNames,num);//將每一幀的名稱放入數組imgNames// CT frameworkCompressiveTracker ct;Mat frame;Mat grayImg;sprintf(tmpDirPath, "%s/", imgFilePath);imgNames[0].insert(0,tmpDirPath);cout<<imgNames[0]<<endl;frame = imread(imgNames[0]);//讀取第一幀圖片cvtColor(frame, grayImg, CV_RGB2GRAY); //轉換成灰度圖ct.init(grayImg, box); //通過第一幀初始化分類器等參數//imshow("CT", frame);//just for test 2014/12/14//waitKey(330); char strFrame[20];FILE* resultStream;resultStream = fopen("TrackingResults.txt", "w");fprintf (resultStream,"%i %i %i %i\n",(int)box.x,(int)box.y,(int)box.width,(int)box.height);for(int i = 1; i < imgNames.size()-1; i ++)//處理之后的每一幀圖片{ sprintf(tmpDirPath, "%s/", imgFilePath);imgNames[i].insert(0,tmpDirPath);frame = imread(imgNames[i]);// get framecvtColor(frame, grayImg, CV_RGB2GRAY);ct.processFrame(grayImg, box);// Process frame//處理圖片幀rectangle(frame, box, Scalar(200,0,0),2);// Draw rectangle//矩形繪制fprintf (resultStream,"%i %i %i %i\n",(int)box.x,(int)box.y,(int)box.width,(int)box.height);sprintf(strFrame, "#%d ",i) ;putText(frame,strFrame,cvPoint(0,20),2,1,CV_RGB(25,200,25));imshow("CT", frame);// DisplaywaitKey(1); }stop =time(NULL);//獲取程序結束運行的時間int FPS = imgNames.size()/(stop-start);cout<< "The FPS of CT is : "<<FPS<<endl;cout<<endl;fclose(resultStream);return 0; }void readConfig(char* configFileName, char* imgFilePath, Rect &box,int &num) {int x;int y;int w;int h;int nums;fstream f;char cstring[1000];int readS=0;f.open(configFileName, fstream::in);char param1[200]; strcpy(param1,"");char param2[200]; strcpy(param2,"");char param3[200]; strcpy(param3,"");//初始化為空串f.getline(cstring, sizeof(cstring));readS=sscanf (cstring, "%s %s %s", param1,param2, param3);strcpy(imgFilePath,param3);f.getline(cstring, sizeof(cstring)); f.getline(cstring, sizeof(cstring)); f.getline(cstring, sizeof(cstring));readS=sscanf (cstring, "%s %s %i %i %i %i %i", param1,param2, &x, &y, &w, &h,&nums);box = Rect(x, y, w, h);num = nums; }void readImageSequenceFiles(char* imgFilePath,vector <string> &imgNames,int &num) { imgNames.clear();/*char tmpDirSpec[MAX_PATH+1];sprintf (tmpDirSpec, "%s/*", imgFilePath);WIN32_FIND_DATA f;HANDLE h = FindFirstFile(tmpDirSpec , &f);if(h != INVALID_HANDLE_VALUE){FindNextFile(h, &f); //read ..FindNextFile(h, &f); //read .do{imgNames.push_back(f.cFileName);} while(FindNextFile(h, &f));}FindClose(h); */String sequencesName = "%05d.jpg";for(int i=1;i<=num;i++){ char imgName[256];sprintf(imgName,sequencesName.c_str(),i);String name = imgName;imgNames.push_back(name);//cout<<"the name of this frame is "<<name<<endl;}} #include "FastCompressiveTracker.h" #include <math.h> #include <iostream> using namespace cv; using namespace std;//------------------------------------------------ CompressiveTracker::CompressiveTracker(void) {featureMinNumRect = 2;featureMaxNumRect = 4; // number of rectangle from 2 to 4featureNum = 50; // number of all weaker classifiers, i.e,feature poolrOuterPositive = 4; // radical scope of positive samples//scope是范圍的意思rSearchWindow = 25; // size of search windowmuPositive = vector<float>(featureNum, 0.0f);//50個muNegative = vector<float>(featureNum, 0.0f);//50個sigmaPositive = vector<float>(featureNum, 1.0f);//50個sigmaNegative = vector<float>(featureNum, 1.0f);//50個learnRate = 0.85f; // Learning rate parameter }CompressiveTracker::~CompressiveTracker(void) { }/*通過積分圖來計算采集到的每一個樣本的harr特征,這個特征通過與featuresWeight來相乘 就相當于投影到隨機測量矩陣中了,也就是進行稀疏表達了。這里不明白的話,可以看下 論文中的圖二,就比較直觀了。 還有一點:實際上這里采用的不屬于真正的harr特征,我博客中翻譯有誤。這里計算的是 在box中采樣得到的不同矩形框的灰度加權求和(當權重是負數的時候就是灰度差) 當為了表述方便,我下面都用harr特征來描述。 每一個樣本有50個harr特征,每一個harr特征是由2到3個隨機選擇的矩形框來構成的, 對這些矩形框的灰度加權求和作為這一個harr特征的特征值。 */ void CompressiveTracker::HaarFeature(Rect& _objectBox, int _numFeature) /*Description: compute Haar featuresArguments:-_objectBox: [x y width height] object rectangle-_numFeature: total number of features.The default is 50.每一個樣本有50個harr特征,每一個harr特征是由2到3個隨機選擇的矩形框來構成的, */ { //_numFeature是一個樣本box的harr特征個數,共50個。而上面說到, //每一個harr特征是由2到3個隨機選擇的矩形框(vector<Rect>()類型)來構成的。 features = vector<vector<Rect>>(_numFeature, vector<Rect>());//每一個反應特征的矩形框對應于一個權重,實際上就是隨機測量矩陣中相應的元素,用它來與對應的特征 //相乘,表示以權重的程度來感知這個特征。換句話說,featuresWeight就是隨機測量矩陣。featuresWeight = vector<vector<float>>(_numFeature, vector<float>());int numRect;Rect rectTemp;float weightTemp;for (int i=0; i<_numFeature; i++)//_numFeature是50{ //numRect是 2或者 3//那么下面的功能就是得到[2,4)范圍的隨機數,然后用cvFloor返回不大于參數的最大整數值,那要么是2,要么是3。numRect = cvFloor(rng.uniform((double)featureMinNumRect, (double)featureMaxNumRect));//這兩個值是2和4for (int j=0; j<numRect; j++){//我在一個box中隨機生成一個矩形框,那和你這個box的x和y坐標就無關了,但我必須保證我選擇 //的這個矩形框不會超出你這個box的范圍啊,是吧 //但這里的3和下面的2是啥意思呢?我就不懂了,個人理解是為了避免這個矩形框太靠近box的邊緣了 //要離邊緣最小2個像素,不知道這樣理解對不對,懇請指導 rectTemp.x = cvFloor(rng.uniform(0.0, (double)(_objectBox.width - 3)));rectTemp.y = cvFloor(rng.uniform(0.0, (double)(_objectBox.height - 3)));//cvCeil 返回不小于參數的最小整數值 rectTemp.width = cvCeil(rng.uniform(0.0, (double)(_objectBox.width - rectTemp.x - 2)));rectTemp.height = cvCeil(rng.uniform(0.0, (double)(_objectBox.height - rectTemp.y - 2)));features[i].push_back(rectTemp);//保存得到的特征模板。注意哦,這里的矩形框是相對于box的相對位置哦,不是針對整幅圖像的哦 weightTemp = (float)pow(-1.0, cvFloor(rng.uniform(0.0, 2.0))) / sqrt(float(numRect));//weightTemp = (float)pow(-1.0, c); //pow(-1.0, c)也就是-1的c次方,而c隨機地取0或者1,也就是說weightTemp是隨機的正或者負。 //隨機測量矩陣中,矩陣元素有三種,sqrt(s)、-sqrt(s)和零。為正和為負的概率是相等的, //這就是為什么是[2,4)均勻采樣的原因,就是取0或者1概率一樣。 //但是這里為什么是sqrt(s)分之一呢?還有什么時候是0呢?論文中是0的概率不是挺大的嗎? //沒有0元素,哪來的稀疏表達和壓縮呢?不懂,懇請指導!(當然稀疏表達的另一個好處 //就是只需保存非零元素。但這里和這個有關系嗎?) featuresWeight[i].push_back(weightTemp);}} }在上一幀跟蹤的目標box的周圍采集若干正樣本和負樣本,來初始化或者更新分類器的 void CompressiveTracker::sampleRect(Mat& _image, Rect& _objectBox, float _rInner, float _rOuter, int _maxSampleNum, vector<Rect>& _sampleBox) /* Description: compute the coordinate of positive and negative sample image templatesArguments:-_image: processing frame-_objectBox: recent object position -_rInner: inner sampling radius-_rOuter: Outer sampling radius-_maxSampleNum: maximal number of sampled images-_sampleBox: Storing the rectangle coordinates of the sampled images. */ {int rowsz = _image.rows - _objectBox.height - 1;int colsz = _image.cols - _objectBox.width - 1;float inradsq = _rInner*_rInner;float outradsq = _rOuter*_rOuter;//我們是在上一幀跟蹤的目標box的周圍采集正樣本和負樣本的,而這個周圍是通過以 //這個目標為中心的兩個圓來表示,這兩個圓的半徑是_rInner和_rOuter。 //我們在離上一幀跟蹤的目標box的小于_rInner距離的范圍內采集正樣本, //在大于_rOuter距離的范圍內采集負樣本(論文中還有一個上界,但好像 //這里沒有,其實好像也沒什么必要噢)int dist;//這四個是為了防止采集的框超出圖像范圍的,對采集的box的x和y坐標做限制 int minrow = max(0,(int)_objectBox.y-(int)_rInner);int maxrow = min((int)rowsz-1,(int)_objectBox.y+(int)_rInner);int mincol = max(0,(int)_objectBox.x-(int)_rInner);int maxcol = min((int)colsz-1,(int)_objectBox.x+(int)_rInner);int i = 0;float prob = ((float)(_maxSampleNum))/(maxrow-minrow+1)/(maxcol-mincol+1);int r;int c;_sampleBox.clear();//importantRect rec(0,0,0,0);for( r=minrow; r<=(int)maxrow; r++ )for( c=mincol; c<=(int)maxcol; c++ ){dist = (_objectBox.y-r)*(_objectBox.y-r) + (_objectBox.x-c)*(_objectBox.x-c);//后兩個條件是保證距離需要在_rInner和_rOuter的范圍內 //那么rng.uniform(0.,1.) < prob 這個是干嘛的呢? //連著上面看,如果_maxSampleNum大于那個最大個數,prob就大于1,這樣, //rng.uniform(0.,1.) < prob這個條件就總能滿足,表示在這個范圍產生的 //所以box我都要了(因為我本身想要更多的,但是你給不了我那么多,那么你能給的,我肯定全要了)。 //那如果你給的太多了,我不要那么多,也就是prob<1,那我就隨機地跳幾個走好了 if( rng.uniform(0.,1.)<prob && dist < inradsq && dist >= outradsq ){rec.x = c;rec.y = r;rec.width = _objectBox.width;rec.height= _objectBox.height;_sampleBox.push_back(rec); i++;}}_sampleBox.resize(i);} //這個sampleRect的重載函數是用來在上一幀跟蹤的目標box的周圍(距離小于_srw)采集若干box來待檢測。 //與上面的那個不一樣,上面那個是在這一幀已經檢測出目標的基礎上,采集正負樣本來更新分類器的。 //上面那個屬于論文中提到的算法的第四個步驟,這個是第一個步驟。然后過程差不多,沒什么好說的了 // void CompressiveTracker::sampleRect(Mat& _image, Rect& _objectBox, float _srw, vector<Rect>& _sampleBox,int step) /* Description: Compute the coordinate of samples when detecting the object.*/ {int rowsz = _image.rows - _objectBox.height - 1;int colsz = _image.cols - _objectBox.width - 1;float inradsq = _srw*_srw; int dist;int minrow = max(0,(int)_objectBox.y-(int)_srw);int maxrow = min((int)rowsz-1,(int)_objectBox.y+(int)_srw);int mincol = max(0,(int)_objectBox.x-(int)_srw);int maxcol = min((int)colsz-1,(int)_objectBox.x+(int)_srw);int i = 0;int r;int c;Rect rec(0,0,0,0);_sampleBox.clear();//important//step表示步長for( r=minrow; r<=(int)maxrow; r=r+step )for( c=mincol; c<=(int)maxcol; c=c+step ){dist = (_objectBox.y-r)*(_objectBox.y-r) + (_objectBox.x-c)*(_objectBox.x-c);if( dist < inradsq ){rec.x = c;rec.y = r;rec.width = _objectBox.width;rec.height= _objectBox.height;_sampleBox.push_back(rec); i++;}}_sampleBox.resize(i);} // Compute the features of samples //通過積分圖來計算采集到的每一個樣本的harr特征,這個特征通過與featuresWeight來相乘 //就相當于投影到隨機測量矩陣中了,也就是進行稀疏表達了。這里不明白的話,可以看下 //論文中的圖二,就比較直觀了。所以這里得到的是:每個樣本的稀疏表達后的harr特征。 //還有一點:實際上這里采用的不屬于真正的harr特征,我博客中翻譯有誤。這里計算的是 //在box中采樣得到的不同矩形框的灰度加權求和 void CompressiveTracker::getFeatureValue(Mat& _imageIntegral, vector<Rect>& _sampleBox, Mat& _sampleFeatureValue) {int sampleBoxSize = _sampleBox.size();_sampleFeatureValue.create(featureNum, sampleBoxSize, CV_32F);//featureNum是50,參數分別是行、列、類型float tempValue;int xMin;int xMax;int yMin;int yMax;for (int i=0; i<featureNum; i++){for (int j=0; j<sampleBoxSize; j++){tempValue = 0.0f;for (size_t k=0; k<features[i].size(); k++){//features中保存的特征模板(矩形框)是相對于box的相對位置的, //所以需要加上box的坐標才是其在整幅圖像中的坐標 xMin = _sampleBox[j].x + features[i][k].x;xMax = _sampleBox[j].x + features[i][k].x + features[i][k].width;yMin = _sampleBox[j].y + features[i][k].y;yMax = _sampleBox[j].y + features[i][k].y + features[i][k].height;tempValue += featuresWeight[i][k] * (_imageIntegral.at<float>(yMin, xMin) +_imageIntegral.at<float>(yMax, xMax) -_imageIntegral.at<float>(yMin, xMax) -_imageIntegral.at<float>(yMax, xMin));}_sampleFeatureValue.at<float>(i,j) = tempValue;}} } // Update the mean and variance of the gaussian classifier //論文中是通過用高斯分布去描述樣本的每一個harr特征的概率分布的。高斯分布就可以通過期望和方差 //兩個參數來表征。然后通過正負樣本的每一個harr特征高斯概率分布的對數比值,來構建分類器決策 //該box屬于目標還是背景。這里計算新采集到的正負樣本的特征的期望和標準差,并用其來更新分類器 void CompressiveTracker::classifierUpdate(Mat& _sampleFeatureValue, vector<float>& _mu, vector<float>& _sigma, float _learnRate) {//后面默認的參數個數是50個50個和0.85Scalar muTemp;Scalar sigmaTemp;for (int i=0; i<featureNum; i++){meanStdDev(_sampleFeatureValue.row(i), muTemp, sigmaTemp);_sigma[i] = (float)sqrt( _learnRate*_sigma[i]*_sigma[i] + (1.0f-_learnRate)*sigmaTemp.val[0]*sigmaTemp.val[0] + _learnRate*(1.0f-_learnRate)*(_mu[i]-muTemp.val[0])*(_mu[i]-muTemp.val[0])); // equation 6 in paper_mu[i] = _mu[i]*_learnRate + (1.0f-_learnRate)*muTemp.val[0]; // equation 6 in paper} }// Compute the ratio classifier void CompressiveTracker::radioClassifier(vector<float>& _muPos, vector<float>& _sigmaPos, vector<float>& _muNeg, vector<float>& _sigmaNeg,Mat& _sampleFeatureValue, float& _radioMax, int& _radioMaxIndex) {float sumRadio;_radioMax = -FLT_MAX;_radioMaxIndex = 0;float pPos;float pNeg;int sampleBoxNum = _sampleFeatureValue.cols;for (int j=0; j<sampleBoxNum; j++){sumRadio = 0.0f;for (int i=0; i<featureNum; i++){pPos = exp( (_sampleFeatureValue.at<float>(i,j)-_muPos[i])*(_sampleFeatureValue.at<float>(i,j)-_muPos[i]) / -(2.0f*_sigmaPos[i]*_sigmaPos[i]+1e-30) ) / (_sigmaPos[i]+1e-30);pNeg = exp( (_sampleFeatureValue.at<float>(i,j)-_muNeg[i])*(_sampleFeatureValue.at<float>(i,j)-_muNeg[i]) / -(2.0f*_sigmaNeg[i]*_sigmaNeg[i]+1e-30) ) / (_sigmaNeg[i]+1e-30);sumRadio += log(pPos+1e-30) - log(pNeg+1e-30); // equation 4}if (_radioMax < sumRadio){_radioMax = sumRadio;_radioMaxIndex = j;}} } //傳入第一幀和要跟蹤的目標box(由文件讀入或用戶鼠標框選),初始化處理 void CompressiveTracker::init(Mat& _frame, Rect& _objectBox) {// compute feature template//計算box的harr特征模板,先存著HaarFeature(_objectBox, featureNum);//因為這是第一幀,目標box是由由文件讀入或者用戶鼠標框選的,是已知的, //所以我們通過在這個目標box周圍,采集正樣本和負樣本來初始化我們的分類器 // compute sample templatessampleRect(_frame, _objectBox, rOuterPositive, 0, 1000000, samplePositiveBox);//rOuterPositive 默認是4sampleRect(_frame, _objectBox, rSearchWindow*1.5, rOuterPositive+4.0, 100, sampleNegativeBox);//rSearchWindow是25//計算積分圖,用以快速的計算harr特征integral(_frame, imageIntegral, CV_32F);//通過上面的積分圖,計算我們采樣到的正負樣本的box的harr特征 getFeatureValue(imageIntegral, samplePositiveBox, samplePositiveFeatureValue);getFeatureValue(imageIntegral, sampleNegativeBox, sampleNegativeFeatureValue);//通過上面的正負樣本的特征來初始化分類器 classifierUpdate(samplePositiveFeatureValue, muPositive, sigmaPositive, learnRate);classifierUpdate(sampleNegativeFeatureValue, muNegative, sigmaNegative, learnRate); } void CompressiveTracker::processFrame(Mat& _frame, Rect& _objectBox) {// predict//第一次采樣,半徑為25,步長為4(跟CT算法不同地方,這里分了兩次采樣,將采樣的數量減少)sampleRect(_frame, _objectBox, rSearchWindow,detectBox,4);integral(_frame, imageIntegral, CV_32F);getFeatureValue(imageIntegral, detectBox, detectFeatureValue);int radioMaxIndex;float radioMax;radioClassifier(muPositive, sigmaPositive, muNegative, sigmaNegative, detectFeatureValue, radioMax, radioMaxIndex);_objectBox = detectBox[radioMaxIndex];//第二次采樣,半徑為10,步長為1sampleRect(_frame, _objectBox, 10,detectBox,1);integral(_frame, imageIntegral, CV_32F);getFeatureValue(imageIntegral, detectBox, detectFeatureValue);radioClassifier(muPositive, sigmaPositive, muNegative, sigmaNegative, detectFeatureValue, radioMax, radioMaxIndex);_objectBox = detectBox[radioMaxIndex];// updatesampleRect(_frame, _objectBox, rOuterPositive, 0.0, 1000000, samplePositiveBox);sampleRect(_frame, _objectBox, rSearchWindow*1.5, rOuterPositive+4.0, 100, sampleNegativeBox);getFeatureValue(imageIntegral, samplePositiveBox, samplePositiveFeatureValue);getFeatureValue(imageIntegral, sampleNegativeBox, sampleNegativeFeatureValue);classifierUpdate(samplePositiveFeatureValue, muPositive, sigmaPositive, learnRate);classifierUpdate(sampleNegativeFeatureValue, muNegative, sigmaNegative, learnRate); }
項目下載鏈接:
linux下:點擊打開鏈接?linux下通過make編譯
windows下:點擊打開鏈接?openCV2.4.9 vs2012的工程
總結
以上是生活随笔為你收集整理的Fast Compressive Tracking(快速压缩跟踪)算法的C++代码实现的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: React-Navigation Sta
- 下一篇: 【玩转yolov5】使用TensorRT