OpenCV之feature2d 模块. 2D特征框架(2)特征描述 使用FLANN进行特征点匹配 使用二维特征点(Features2D)和单映射(Homography)寻找已知物体 平面物体检测
特征描述
目標
在本教程中,我們將涉及:
- 使用?DescriptorExtractor?接口來尋找關鍵點對應的特征向量. 特別地:
- 使用?SurfDescriptorExtractor?以及它的函數?compute?來完成特定的計算.
- 使用?BruteForceMatcher?來匹配特征向量。
- 使用函數?drawMatches?來繪制檢測到的匹配點.
理論
代碼
這個教程代碼如下所示. 你還可以?從這里下載到源代碼
#include <stdio.h> #include <iostream> #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp"using namespace cv;void readme();/** @function main */ int main( int argc, char** argv ) {if( argc != 3 ){ return -1; }Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );if( !img_1.data || !img_2.data ){ return -1; }//-- Step 1: Detect the keypoints using SURF Detectorint minHessian = 400;SurfFeatureDetector detector( minHessian );std::vector<KeyPoint> keypoints_1, keypoints_2;detector.detect( img_1, keypoints_1 );detector.detect( img_2, keypoints_2 );//-- Step 2: Calculate descriptors (feature vectors)SurfDescriptorExtractor extractor;Mat descriptors_1, descriptors_2;extractor.compute( img_1, keypoints_1, descriptors_1 );extractor.compute( img_2, keypoints_2, descriptors_2 );//-- Step 3: Matching descriptor vectors with a brute force matcherBruteForceMatcher< L2<float> > matcher;std::vector< DMatch > matches;matcher.match( descriptors_1, descriptors_2, matches );//-- Draw matchesMat img_matches;drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );//-- Show detected matchesimshow("Matches", img_matches );waitKey(0);return 0;}/** @function readme */void readme(){ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }解釋
結果
這是使用BruteForce 匹配兩張圖的結果:
使用FLANN進行特征點匹配
目標
在本教程中我們將涉及以下內容:
- 使用?FlannBasedMatcher?接口以及函數?FLANN?實現快速高效匹配(?快速最近鄰逼近搜索函數庫(Fast Approximate Nearest Neighbor Search Library)?)
理論
代碼
這個教程的源代碼如下所示。你還可以從?以下鏈接下載得到源代碼
#include <stdio.h> #include <iostream> #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp"using namespace cv;void readme();/** @function main */ int main( int argc, char** argv ) {if( argc != 3 ){ readme(); return -1; }Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );if( !img_1.data || !img_2.data ){ std::cout<< " --(!) Error reading images " << std::endl; return -1; }//-- Step 1: Detect the keypoints using SURF Detectorint minHessian = 400;SurfFeatureDetector detector( minHessian );std::vector<KeyPoint> keypoints_1, keypoints_2;detector.detect( img_1, keypoints_1 );detector.detect( img_2, keypoints_2 );//-- Step 2: Calculate descriptors (feature vectors)SurfDescriptorExtractor extractor;Mat descriptors_1, descriptors_2;extractor.compute( img_1, keypoints_1, descriptors_1 );extractor.compute( img_2, keypoints_2, descriptors_2 );//-- Step 3: Matching descriptor vectors using FLANN matcherFlannBasedMatcher matcher;std::vector< DMatch > matches;matcher.match( descriptors_1, descriptors_2, matches );double max_dist = 0; double min_dist = 100;//-- Quick calculation of max and min distances between keypointsfor( int i = 0; i < descriptors_1.rows; i++ ){ double dist = matches[i].distance;if( dist < min_dist ) min_dist = dist;if( dist > max_dist ) max_dist = dist;}printf("-- Max dist : %f \n", max_dist );printf("-- Min dist : %f \n", min_dist );//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )//-- PS.- radiusMatch can also be used here.std::vector< DMatch > good_matches;for( int i = 0; i < descriptors_1.rows; i++ ){ if( matches[i].distance < 2*min_dist ){ good_matches.push_back( matches[i]); }}//-- Draw only "good" matchesMat img_matches;drawMatches( img_1, keypoints_1, img_2, keypoints_2,good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );//-- Show detected matchesimshow( "Good Matches", img_matches );for( int i = 0; i < good_matches.size(); i++ ){ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }waitKey(0);return 0;}/** @function readme */void readme(){ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }解釋
結果
這里是第一張圖特征點檢測結果:
此外我們通過控制臺輸出FLANN匹配關鍵點結果:
使用二維特征點(Features2D)和單映射(Homography)尋找已知物體
目標
在本教程中我們將涉及以下內容:
- 使用函數?findHomography?尋找匹配上的關鍵點的變換。
- 使用函數?perspectiveTransform?來映射點.
理論
代碼
這個教程的源代碼如下所示。你還可以從?以下鏈接下載到源代碼
#include <stdio.h> #include <iostream> #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/calib3d/calib3d.hpp"using namespace cv;void readme();/** @function main */ int main( int argc, char** argv ) {if( argc != 3 ){ readme(); return -1; }Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );if( !img_object.data || !img_scene.data ){ std::cout<< " --(!) Error reading images " << std::endl; return -1; }//-- Step 1: Detect the keypoints using SURF Detectorint minHessian = 400;SurfFeatureDetector detector( minHessian );std::vector<KeyPoint> keypoints_object, keypoints_scene;detector.detect( img_object, keypoints_object );detector.detect( img_scene, keypoints_scene );//-- Step 2: Calculate descriptors (feature vectors)SurfDescriptorExtractor extractor;Mat descriptors_object, descriptors_scene;extractor.compute( img_object, keypoints_object, descriptors_object );extractor.compute( img_scene, keypoints_scene, descriptors_scene );//-- Step 3: Matching descriptor vectors using FLANN matcherFlannBasedMatcher matcher;std::vector< DMatch > matches;matcher.match( descriptors_object, descriptors_scene, matches );double max_dist = 0; double min_dist = 100;//-- Quick calculation of max and min distances between keypointsfor( int i = 0; i < descriptors_object.rows; i++ ){ double dist = matches[i].distance;if( dist < min_dist ) min_dist = dist;if( dist > max_dist ) max_dist = dist;}printf("-- Max dist : %f \n", max_dist );printf("-- Min dist : %f \n", min_dist );//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )std::vector< DMatch > good_matches;for( int i = 0; i < descriptors_object.rows; i++ ){ if( matches[i].distance < 3*min_dist ){ good_matches.push_back( matches[i]); }}Mat img_matches;drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );//-- Localize the objectstd::vector<Point2f> obj;std::vector<Point2f> scene;for( int i = 0; i < good_matches.size(); i++ ){//-- Get the keypoints from the good matchesobj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );}Mat H = findHomography( obj, scene, CV_RANSAC );//-- Get the corners from the image_1 ( the object to be "detected" )std::vector<Point2f> obj_corners(4);obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );std::vector<Point2f> scene_corners(4);perspectiveTransform( obj_corners, scene_corners, H);//-- Draw lines between the corners (the mapped object in the scene - image_2 )line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );//-- Show detected matchesimshow( "Good Matches & Object detection", img_matches );waitKey(0);return 0;}/** @function readme */void readme(){ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }解釋
結果?
檢測到的目標結果 (用綠色標記出來的部分)
平面物體檢測
這個教程的目標是學習如何使用?features2d?和?calib3d?模塊來檢測場景中的已知平面物體。
測試數據: 數據圖像文件,比如 “box.png”或者“box_in_scene.png”等。
創建新的控制臺(console)項目。讀入兩個輸入圖像。
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);檢測兩個圖像的關鍵點(尺度旋轉都不發生變化的關鍵點)。
// 對第一幅圖像進行關鍵點檢測 FastFeatureDetector detector(15); vector<KeyPoint> keypoints1; detector.detect(img1, keypoints1);... // 對第二幅圖像進行關鍵點檢測計算每個關鍵點的描述向量(Descriptor)。
// 計算描述向量 SurfDescriptorExtractor extractor; Mat descriptors1; extractor.compute(img1, keypoints1, descriptors1);... // 計算第二幅圖像中的關鍵點對應的描述向量計算兩幅圖像中的關鍵點對應的描述向量距離,尋找兩圖像中距離最近的描述向量對應的關鍵點,即為兩圖像中匹配上的關鍵點:
// 關鍵點描述向量匹配 BruteForceMatcher<L2<float> > matcher; vector<DMatch> matches; matcher.match(descriptors1, descriptors2, matches);可視化結果:
// 繪制出結果 namedWindow("matches", 1); Mat img_matches; drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches); imshow("matches", img_matches); waitKey(0);尋找兩個點集合中的單映射變換(homography transformation):
vector<Point2f> points1, points2; // 用點填充形成矩陣(array) .... Mat H = findHomography(Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold);創建內匹配點集合同時繪制出匹配上的點。用perspectiveTransform函數來通過單映射來映射點:
Mat points1Projected; perspectiveTransform(Mat(points1), points1Projected, H);
用?drawMatches?來繪制內匹配點.
from: http://www.opencv.org.cn/opencvdoc/2.3.2/html/doc/tutorials/features2d/table_of_content_features2d/table_of_content_features2d.html#table-of-content-feature2d
總結
以上是生活随笔為你收集整理的OpenCV之feature2d 模块. 2D特征框架(2)特征描述 使用FLANN进行特征点匹配 使用二维特征点(Features2D)和单映射(Homography)寻找已知物体 平面物体检测的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: OpenCV之feature2d 模块.
- 下一篇: OpenCV之objdetect 模块.