日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當(dāng)前位置: 首頁 > 人工智能 > 目标检测 >内容正文

目标检测

【opencv】动态背景下运动目标检测 SURF配准差分

發(fā)布時間:2024/3/13 目标检测 90 豆豆
生活随笔 收集整理的這篇文章主要介紹了 【opencv】动态背景下运动目标检测 SURF配准差分 小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.

主要思路是,讀入視頻,隔幀采用SURF計算匹配的特征點,進而計算兩圖的投影映射矩陣,做差分二值化,連通域檢測,繪制目標(biāo)。

如果背景是靜態(tài)的采用camshift即可。

本文方法速度debug下大概2-3幀,release下8-9幀(SURF部分,不包含連通域以及繪制),后續(xù)可增加選定目標(biāo),動態(tài)模版小鄰域中跟蹤目標(biāo)。實現(xiàn)對動態(tài)背景下的運動目標(biāo)檢測,模版跟蹤速度可達(dá)150幀。

?

環(huán)境:opencv2.4.9 + vs2012

#include <iostream> #include <opencv2/opencv.hpp> #include <opencv2/nonfree/nonfree.hpp> using namespace cv; using namespace std;void main() {//VideoCapture capture(0);VideoCapture capture("3.mov");Mat image01,image02,imgdiff;while (true){//隔兩幀配準(zhǔn)capture >> image01;if (image01.empty()){break;}capture >> image02;capture >> image02;if (image02.empty()){break;}//GaussianBlur(image02, image02, Size(3,3), 0);double time0 = static_cast<double>(getTickCount());//開始計時//灰度圖轉(zhuǎn)換 Mat image1,image2; cvtColor(image01,image1,CV_RGB2GRAY); cvtColor(image02,image2,CV_RGB2GRAY); //提取特征點 SurfFeatureDetector surfDetector(2500); // 海塞矩陣閾值,高一點速度會快些vector<KeyPoint> keyPoint1,keyPoint2; surfDetector.detect(image1,keyPoint1); surfDetector.detect(image2,keyPoint2); //特征點描述,為下邊的特征點匹配做準(zhǔn)備 SurfDescriptorExtractor SurfDescriptor; Mat imageDesc1,imageDesc2; SurfDescriptor.compute(image1,keyPoint1,imageDesc1); SurfDescriptor.compute(image2,keyPoint2,imageDesc2); //獲得匹配特征點,并提取最優(yōu)配對 FlannBasedMatcher matcher; vector<DMatch> matchePoints; matcher.match(imageDesc1,imageDesc2,matchePoints,Mat()); sort(matchePoints.begin(),matchePoints.end()); //特征點排序 //獲取排在前N個的最優(yōu)匹配特征點 vector<Point2f> imagePoints1,imagePoints2; for(int i=0; i<(int)(matchePoints.size()*0.25); i++) { imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt); imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt); } //獲取圖像1到圖像2的投影映射矩陣 尺寸為3*3 Mat homo=findHomography(imagePoints1,imagePoints2,CV_RANSAC); //cout<<"變換矩陣為:\n"<<homo<<endl<<endl; //輸出映射矩陣 //圖像配準(zhǔn) Mat imageTransform1,imgpeizhun,imgerzhi; warpPerspective(image01,imageTransform1,homo,Size(image02.cols,image02.rows)); //imshow("經(jīng)過透視矩陣變換后",imageTransform1); absdiff(image02, imageTransform1, imgpeizhun);//imshow("配準(zhǔn)diff", imgpeizhun); threshold(imgpeizhun, imgerzhi, 50, 255.0 , CV_THRESH_BINARY);//imshow("配準(zhǔn)二值化", imgerzhi);//輸出所需時間time0 = ((double)getTickCount()-time0)/getTickFrequency();cout<<1/time0<<endl;Mat temp,image02temp;float m_BiLi = 0.9;image02temp = image02.clone();cvtColor(imgerzhi,temp,CV_RGB2GRAY); //檢索連通域Mat se=getStructuringElement(MORPH_RECT, Size(5,5));morphologyEx(temp, temp, MORPH_DILATE, se);vector<vector<Point>> contours;findContours(temp, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);if (contours.size()<1){continue;}for (int k = 0; k < contours.size(); k++){Rect bomen = boundingRect(contours[k]);//省略由于配準(zhǔn)帶來的邊緣無效信息if (bomen.x > image02temp.cols * (1 - m_BiLi) && bomen.y > image02temp.rows * (1 - m_BiLi) && bomen.x + bomen.width < image02temp.cols * m_BiLi && bomen.y + bomen.height < image02temp.rows * m_BiLi){rectangle(image02temp, bomen, Scalar(255,0,255), 2, 8, 0);}}/*for (int i = 50; i < image02.rows - 100; i++){for (int j = 50; j < image02.cols - 100; j++){uchar pixel = temp.at<uchar>(i,j);if (pixel == 255){Rect bomen(j-7, i-7, 14, 14);rectangle(image02, bomen, Scalar(255,255,255),1,8,0);}}}*/imshow("檢測與跟蹤",image02temp);waitKey(20); } }

檢測遠(yuǎn)處運動的車輛

?

surf消除誤匹配點

?

int surf2(Mat image01, Mat image02) {Mat image1,image2; image1=image01.clone(); image2=image02.clone(); //提取特征點 SurfFeatureDetector surfDetector(2000); //hessianThreshold,海塞矩陣閾值,并不是限定特征點的個數(shù) vector<KeyPoint> keyPoint1,keyPoint2; surfDetector.detect(image1,keyPoint1); surfDetector.detect(image2,keyPoint2); //繪制特征點 drawKeypoints(image1,keyPoint1,image1,Scalar::all(-1),DrawMatchesFlags::DEFAULT); drawKeypoints(image2,keyPoint2,image2,Scalar::all(-1),DrawMatchesFlags::DRAW_RICH_KEYPOINTS); /* imshow("KeyPoints of image1",image1); imshow("KeyPoints of image2",image2); */ //特征點描述,為下邊的特征點匹配做準(zhǔn)備 SurfDescriptorExtractor SurfDescriptor; Mat imageDesc1,imageDesc2; SurfDescriptor.compute(image1,keyPoint1,imageDesc1); SurfDescriptor.compute(image2,keyPoint2,imageDesc2); //特征點匹配并顯示匹配結(jié)果 //BruteForceMatcher<L2<float>> matcher; FlannBasedMatcher matcher; vector<DMatch> matchePoints; matcher.match(imageDesc1,imageDesc2,matchePoints,Mat()); //提取強特征點 double minMatch=1; double maxMatch=0; for(int i=0;i<matchePoints.size();i++) { //匹配值最大最小值獲取 minMatch=minMatch>matchePoints[i].distance?matchePoints[i].distance:minMatch; maxMatch=maxMatch<matchePoints[i].distance?matchePoints[i].distance:maxMatch; } //最大最小值輸出 cout<<"最佳匹配值是: "<<minMatch<<endl; cout<<"最差匹配值是: "<<maxMatch<<endl; //獲取排在前邊的幾個最優(yōu)匹配結(jié)果 vector<DMatch> goodMatchePoints; for(int i=0;i<matchePoints.size();i++) { if(matchePoints[i].distance<minMatch+(maxMatch-minMatch)/2) { goodMatchePoints.push_back(matchePoints[i]); } } //繪制最優(yōu)匹配點 Mat imageOutput; drawMatches(image01,keyPoint1,image02,keyPoint2,goodMatchePoints,imageOutput,Scalar::all(-1), Scalar::all(-1),vector<char>(),DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); imwrite("匹配圖.jpg",imageOutput);return 0; }

?

?

?

?

?

總結(jié)

以上是生活随笔為你收集整理的【opencv】动态背景下运动目标检测 SURF配准差分的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。