日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 人文社科 > 生活经验 >内容正文

生活经验

基于SfM计算相机姿态

發布時間:2023/11/27 生活经验 32 豆豆
生活随笔 收集整理的這篇文章主要介紹了 基于SfM计算相机姿态 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

具體過程為:
① 通過相機標定方法,預先計算相機的內參矩陣;
② 相鄰幀特征點匹配,并結合內參矩陣計算本征矩陣;
③ 本征矩陣分解獲得相機的外參矩陣[R | T],最終的相機移動距離等于矩陣T的Frobenius范數。

#include <opencv2\xfeatures2d\nonfree.hpp>
#include <opencv2\features2d\features2d.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <iostream>using namespace cv;
using namespace std;void extract_features(vector<string>& image_names,vector<vector<KeyPoint>>& key_points_for_all,vector<Mat>& descriptor_for_all,vector<vector<Vec3b>>& colors_for_all)
{key_points_for_all.clear();descriptor_for_all.clear();Mat image;//讀取圖像,獲取圖像特征點,并保存Ptr<Feature2D> sift = xfeatures2d::SIFT::create(0, 3, 0.04, 10);for (auto it = image_names.begin(); it != image_names.end(); ++it){image = imread(*it);if (image.empty()) continue;vector<KeyPoint> key_points;Mat descriptor;//偶爾出現內存分配失敗的錯誤sift->detectAndCompute(image, noArray(), key_points, descriptor);//特征點過少,則排除該圖像if (key_points.size() <= 10) continue;key_points_for_all.push_back(key_points);descriptor_for_all.push_back(descriptor);vector<Vec3b> colors(key_points.size());for (int i = 0; i < key_points.size(); ++i){Point2f& p = key_points[i].pt;colors[i] = image.at<Vec3b>(p.y, p.x);}colors_for_all.push_back(colors);}
}void match_features(Mat& query, Mat& train, vector<DMatch>& matches)
{vector<vector<DMatch>> knn_matches;BFMatcher matcher(NORM_L2);matcher.knnMatch(query, train, knn_matches, 2);//獲取滿足Ratio Test的最小匹配的距離float min_dist = FLT_MAX;for (int r = 0; r < knn_matches.size(); ++r){//Ratio Testif (knn_matches[r][0].distance > 0.6*knn_matches[r][1].distance)continue;float dist = knn_matches[r][0].distance;if (dist < min_dist) min_dist = dist;}matches.clear();for (size_t r = 0; r < knn_matches.size(); ++r){//排除不滿足Ratio Test的點和匹配距離過大的點if (knn_matches[r][0].distance > 0.6*knn_matches[r][1].distance ||knn_matches[r][0].distance > 5 * max(min_dist, 10.0f))continue;//保存匹配點matches.push_back(knn_matches[r][0]);}
}bool find_transform(Mat& K, vector<Point2f>& p1, vector<Point2f>& p2, Mat& R, Mat& T, Mat& mask)
{//根據內參矩陣獲取相機的焦距和光心坐標(主點坐標)double focal_length = 0.5*(K.at<double>(0) + K.at<double>(4));Point2d principle_point(K.at<double>(2), K.at<double>(5));//根據匹配點求取本征矩陣,使用RANSAC,進一步排除失配點Mat E = findEssentialMat(p1, p2, focal_length, principle_point, RANSAC, 0.999, 1.0, mask);if (E.empty()) return false;double feasible_count = countNonZero(mask);cout << (int)feasible_count << " -in- " << p1.size() << endl;//對于RANSAC而言,outlier數量大于50%時,結果是不可靠的if (feasible_count <= 15 || (feasible_count / p1.size()) < 0.6)return false;//分解本征矩陣,獲取相對變換int pass_count = recoverPose(E, p1, p2, R, T, focal_length, principle_point, mask);//同時位于兩個相機前方的點的數量要足夠大if (((double)pass_count) / feasible_count < 0.7)return false;return true;
}void get_matched_points(vector<KeyPoint>& p1, vector<KeyPoint>& p2, vector<DMatch> matches, vector<Point2f>& out_p1, vector<Point2f>& out_p2)
{out_p1.clear();out_p2.clear();for (int i = 0; i < matches.size(); ++i){out_p1.push_back(p1[matches[i].queryIdx].pt);out_p2.push_back(p2[matches[i].trainIdx].pt);}
}void get_matched_colors(vector<Vec3b>& c1,vector<Vec3b>& c2,vector<DMatch> matches,vector<Vec3b>& out_c1,vector<Vec3b>& out_c2)
{out_c1.clear();out_c2.clear();for (int i = 0; i < matches.size(); ++i){out_c1.push_back(c1[matches[i].queryIdx]);out_c2.push_back(c2[matches[i].trainIdx]);}
}void reconstruct(Mat& K, Mat& R, Mat& T, vector<Point2f>& p1, vector<Point2f>& p2, Mat& structure)
{//兩個相機的投影矩陣[R T],triangulatePoints只支持float型Mat proj1(3, 4, CV_32FC1);Mat proj2(3, 4, CV_32FC1);proj1(Range(0, 3), Range(0, 3)) = Mat::eye(3, 3, CV_32FC1);proj1.col(3) = Mat::zeros(3, 1, CV_32FC1);R.convertTo(proj2(Range(0, 3), Range(0, 3)), CV_32FC1);T.convertTo(proj2.col(3), CV_32FC1);Mat fK;K.convertTo(fK, CV_32FC1);proj1 = fK*proj1;proj2 = fK*proj2;//三角重建triangulatePoints(proj1, proj2, p1, p2, structure);
}void maskout_points(vector<Point2f>& p1, Mat& mask)
{vector<Point2f> p1_copy = p1;p1.clear();for (int i = 0; i < mask.rows; ++i){if (mask.at<uchar>(i) > 0)p1.push_back(p1_copy[i]);}
}void maskout_colors(vector<Vec3b>& p1, Mat& mask)
{vector<Vec3b> p1_copy = p1;p1.clear();for (int i = 0; i < mask.rows; ++i){if (mask.at<uchar>(i) > 0)p1.push_back(p1_copy[i]);}
}void save_structure(string file_name, vector<Mat>& rotations, vector<Mat>& motions, Mat& structure, vector<Vec3b>& colors)
{int n = (int)rotations.size();FileStorage fs(file_name, FileStorage::WRITE);fs << "Camera Count" << n;fs << "Point Count" << structure.cols;fs << "Rotations" << "[";for (size_t i = 0; i < n; ++i){fs << rotations[i];}fs << "]";fs << "Motions" << "[";for (size_t i = 0; i < n; ++i){fs << motions[i];}fs << "]";fs << "Points" << "[";for (size_t i = 0; i < structure.cols; ++i){Mat_<float> c = structure.col(i);c /= c(3);	//齊次坐標,需要除以最后一個元素才是真正的坐標值fs << Point3f(c(0), c(1), c(2));}fs << "]";fs << "Colors" << "[";for (size_t i = 0; i < colors.size(); ++i){fs << colors[i];}fs << "]";fs.release();
}void main()
{string img1 = "0004.png";string img2 = "0006.png";vector<string> img_names = { img1, img2 };vector<vector<KeyPoint>> key_points_for_all;vector<Mat> descriptor_for_all;vector<vector<Vec3b>> colors_for_all;vector<DMatch> matches;//本征矩陣Mat K(Matx33d(2759.48, 0, 1520.69,0, 2764.16, 1006.81,0, 0, 1));//提取特征extract_features(img_names, key_points_for_all, descriptor_for_all, colors_for_all);//特征匹配match_features(descriptor_for_all[0], descriptor_for_all[1], matches);//計算變換矩陣vector<Point2f> p1, p2;vector<Vec3b> c1, c2;Mat R, T;	//旋轉矩陣和平移向量Mat mask;	//mask中大于零的點代表匹配點,等于零代表失配點get_matched_points(key_points_for_all[0], key_points_for_all[1], matches, p1, p2);get_matched_colors(colors_for_all[0], colors_for_all[1], matches, c1, c2);find_transform(K, p1, p2, R, T, mask);//三維重建Mat structure;	//4行N列的矩陣,每一列代表空間中的一個點(齊次坐標)maskout_points(p1, mask);maskout_points(p2, mask);reconstruct(K, R, T, p1, p2, structure);//保存并顯示vector<Mat> rotations = { Mat::eye(3, 3, CV_64FC1), R };vector<Mat> motions = { Mat::zeros(3, 1, CV_64FC1), T };maskout_colors(c1, mask);save_structure(".\\Viewer\\structure.yml", rotations, motions, structure, c1);//system(".\\Viewer\\SfMViewer.exe");
}

總結

以上是生活随笔為你收集整理的基于SfM计算相机姿态的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。