日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

鱼眼摄像头 单目标定 双目标定

發布時間:2023/12/29 编程问答 31 豆豆
生活随笔 收集整理的這篇文章主要介紹了 鱼眼摄像头 单目标定 双目标定 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

準確地說是 使用魚眼模型 對攝像頭進行單目和雙目標定
直接貼代碼如下, 如果是 非魚眼, 把相關函數名 和相關入參改下就可以了, 代碼中處于 注釋狀態

2019-12-7 21:47:07
后續又思考了一下,OpenCV魚眼模型應該參考的是 《A Generic Camera Model and Calibration Method for Conventional, Wide-Angle, and Fish-Eye Lenses》。 那既然是通用模型, 對一般相機也能用,也就是說 拿到一個相機,直接用魚眼模型來標定。。。這個后面驗證一下。

#include <opencv2/opencv.hpp> #include <iostream> #include <dirent.h> #include <fstream>using namespace std;bool hasEnding (std::string const &fullString, std::string const &ending) {if (fullString.length() >= ending.length()) {return (0 == fullString.compare (fullString.length() - ending.length(), ending.length(), ending));} else {return false;} }// 擴展名必須為 jpg或者 png void getImgNames(const char* imgsFolderL, const char* imgsFolderR, std::vector<std::string> &imgNamesL,std::vector<std::string> &imgNamesR) {DIR* dp;struct dirent* dirp;dp = opendir(imgsFolderL);if(dp == NULL){std::cout << "open directory error: " << imgsFolderL << std::endl;}while((dirp = readdir(dp)) != NULL){std::string filename = dirp->d_name;if(filename.at(0) == '.'){continue;} else if(!hasEnding(filename, ".jpg") || !hasEnding(filename, ".png")) {continue;}filename = "/" + filename;std::cout << (imgsFolderL + filename) << std::endl;std::cout << (imgsFolderR + filename) << std::endl;imgNamesL.push_back(imgsFolderL + filename);imgNamesR.push_back(imgsFolderR + filename);}closedir(dp);}static bool saveCameraParams(const std::string &filename, int &leftHeight, int &leftWidth, cv::Mat &leftD, cv::Mat &leftK, cv::Mat &leftR, cv::Mat leftP,int &rightHeight, int &rightWidth, cv::Mat &rightD, cv::Mat &rightK, cv::Mat &rightR, cv::Mat &rightP, cv::Mat &trans, cv::Mat& Trl) {std::ofstream fs(filename.c_str());if(!fs.is_open()){return false;}/* std::time_t tt; *//* std::time(&tt); *//* struct tm *t2 = localtime(&tt); *//* char buf[1024]; *//* std::strftime(buf, sizeof(buf), "%c", t2); *//* std::string str = "1.0"; */fs << "%YAML:1.0" << std::endl;fs << std::endl;fs << "#-----------------------------------------" << std::endl;fs << "# Camera Parameters. Adjust them!" << std::endl;fs << "#-----------------------------------------" << std::endl;fs << std::endl;/* fs << "#calibration_time " << buf << std::endl; */fs << std::endl;fs << "Camera.fx: " << leftP.ptr<double>(0)[0] << std::endl;fs << "Camera.fy: " << leftP.ptr<double>(1)[1] << std::endl;fs << "Camera.cx: " << leftP.ptr<double>(0)[2] << std::endl;fs << "Camera.cy: " << leftP.ptr<double>(1)[2] << std::endl;fs << std::endl;fs << "Camera.k1: " << 0.0 << std::endl;fs << "Camera.k2: " << 0.0 << std::endl;fs << "Camera.p1: " << 0.0 << std::endl;fs << "Camera.p2: " << 0.0 << std::endl;fs << std::endl;fs << "Camera.width: " << leftWidth << std::endl;fs << "Camera.height: " << leftHeight << std::endl;fs << std::endl;fs << "#Camera frames per second" << std::endl;fs << "Camera.fps: " << 20 << std::endl;fs << std::endl;fs << "#stereo baseline × Camera.fx" << std::endl;float bf = std::fabs(trans.ptr<double>(0)[0] * leftP.ptr<double>(0)[0]);fs << "Camera.bf: " << bf << std::endl; fs << std::endl;fs << "# Color order of image (0 BGR, 1 RGB, It is ignored if images are grayscale)" << std::endl;fs << "Camera.RGB: " << 1 << std::endl;fs << std::endl;fs << "# Close/Far threshold. Baseline times." << std::endl;fs << "ThDepth: " << 35 << std::endl;fs << std::endl;fs << "#-------------------------------------------" << std::endl;fs << "#Stereo Rectification. Camera.fx, Camera.fy must be the same as in LEFT.P" << std::endl;fs << "#-------------------------------------------" << std::endl;fs << std::endl;fs << "LEFT.height: " << leftHeight << std::endl;fs << "LEFT.width: " << leftWidth << std::endl;fs << "LEFT.D: !!opencv-matrix" << std::endl;fs << " rows: " << 1 << std::endl;fs << " cols: " << 5 << std::endl;fs << " dt: " << 'd' << std::endl;fs << " data: " << leftD << std::endl;cv::Mat rowLeftK = leftK.reshape(0, 1);fs << "LEFT.K: !!opencv-matrix" << std::endl;fs << " rows: " << 3 << std::endl;fs << " cols: " << 3 << std::endl;fs << " dt: " << 'd' << std::endl;fs << " data: " << rowLeftK << std::endl;cv::Mat rowLeftR = leftR.reshape(0, 1);fs << "LEFT.R: !!opencv-matrix" << std::endl;fs << " rows: " << 3 << std::endl;fs << " cols: " << 3 << std::endl;fs << " dt: " << 'd' << std::endl;fs << " data: " << rowLeftR << std::endl;cv::Mat rowLeftP = leftP.reshape(0, 1);fs << "LEFT.P: !!opencv-matrix" << std::endl;fs << " rows: " << 3 << std::endl;fs << " cols: " << 4 << std::endl;fs << " dt: " << 'd' << std::endl;fs << " data: " << rowLeftP << std::endl;fs << std::endl;fs << "RIGHT.height: " << rightHeight << std::endl;fs << "RIGHT.width: " << rightWidth << std::endl;fs << "RIGHT.D: !!opencv-matrix" << std::endl;fs << " rows: " << 1 << std::endl;fs << " cols: " << 5 << std::endl;fs << " dt: " << 'd' << std::endl;fs << " data: " << rightD << std::endl;cv::Mat rowRightK = rightK.reshape(0, 1);fs << "RIGHT.K: !!opencv-matrix" << std::endl;fs << " rows: " << 3 << std::endl;fs << " cols: " << 3 << std::endl;fs << " dt: " << 'd' << std::endl;fs << " data: " << rowRightK << std::endl;cv::Mat rowRightR = rightR.reshape(0, 1);fs << "RIGHT.R: !!opencv-matrix" << std::endl;fs << " rows: " << 3 << std::endl;fs << " cols: " << 3 << std::endl;fs << " dt: " << 'd' << std::endl;fs << " data: " << rowRightR << std::endl;cv::Mat rowRightP = rightP.reshape(0, 1);fs << "RIGHT.P: !!opencv-matrix" << std::endl;fs << " rows: " << 3 << std::endl;fs << " cols: " << 4 << std::endl;fs << " dt: " << 'd' << std::endl;fs << " data: " << rowRightP << std::endl;fs << std::endl;fs << "#-------------------------------------------" << std::endl;fs << "#ORB Parameters" << std::endl;fs << "#-------------------------------------------" << std::endl;fs << std::endl;fs << "ORBextractor.nFeatures: " << 1200 << std::endl;fs << "ORBextractor.scaleFactor: " << 1.2 << std::endl;fs << "ORBextractor.nLevels: " << 8 << std::endl;fs << "ORBextractor.iniThFAST: " << 20 << std::endl;fs << "ORBextractor.minThFAST: " << 7 << std::endl;fs << std::endl;fs << "#-------------------------------------------" << std::endl;fs << "#Viewer Parameters" << std::endl;fs << "#-------------------------------------------" << std::endl;fs << std::endl;fs << "bUseViewer: " << 0 << std::endl;fs << "Viewer.KeyFrameSize: " << 0.05 << std::endl;fs << "Viewer.KeyFrameLineWidth: " << 1 << std::endl;fs << "Viewer.GraphLineWidth: " << 0.9 << std::endl;fs << "Viewer.PointSize: " << 2 << std::endl;fs << "Viewer.CameraSize: " << 0.08 << std::endl;fs << "Viewer.CameraLineWidth: " << 3 << std::endl;fs << "Viewer.ViewpointX: " << 0 << std::endl;fs << "Viewer.ViewpointY: " << -0.7 << std::endl;fs << "Viewer.ViewpointZ: " << -1.8 << std::endl;fs << "Viewer.ViewpointF: " << 500 << std::endl;cv::Mat rowTrl = Trl.reshape(0, 1);fs << "############" << std::endl;fs << "#the extrinsic params from left camera to right camera !" << std::endl;fs << "#Or the extrinsic params from left to the main camera in VIO project!" << std::endl;fs << "LEFT.RIGHT: !!opencv-matrix" << std::endl;fs << " rows: " << 4 << std::endl;fs << " cols: " << 4 << std::endl;fs << " dt: " << 'f' << std::endl;fs << " data: " << rowTrl << std::endl;fs << std::endl;std::cout << "saved the parameters in " << filename << std::endl;return true; }cv::Mat RPY2mat(double roll, double pitch, double yaw) {cv::Mat m(3, 3, CV_64FC1);double cr = std::cos(roll);double sr = std::sin(roll);double cp = std::cos(pitch);double sp = std::sin(pitch);double cy = std::cos(yaw);double sy = std::sin(yaw);m.ptr<double>(0)[0] = cy * cp;m.ptr<double>(0)[1] = cy * sp * sr - sy * cr;m.ptr<double>(0)[2] = cy * sp * cr + sy * sr;m.ptr<double>(1)[0] = sy * cp;m.ptr<double>(1)[1] = sy * sp * sr + cy * cr;m.ptr<double>(1)[2] = sy * sp * cr - cy * sr;m.ptr<double>(2)[0] = - sp;m.ptr<double>(2)[1] = cp * sr;m.ptr<double>(2)[2] = cp * cr;return m.clone(); }void mat2RPY(const cv::Mat &m, double& roll, double& pitch, double& yaw) {roll = std::atan2(m.ptr<double>(2)[1], m.ptr<double>(2)[2]);pitch = std::atan2(-m.ptr<double>(2)[0], std::sqrt(m.ptr<double>(2)[1] * m.ptr<double>(2)[1] + m.ptr<double>(2)[2] * m.ptr<double>(2)[2]));yaw = std::atan2(m.ptr<double>(1)[0], m.ptr<double>(0)[0]); }int main(int argc, char *argv[]) {//argv[1]: 左相機圖片所在的文件夾的路徑//argv[2]: 右相機圖片所在的文件夾的路徑//要求左右兩張圖片的名字是一樣的std::vector<std::string> imgNamesL, imgNamesR;getImgNames(argv[1], argv[2], imgNamesL, imgNamesR);const int board_w = 7;const int board_h = 5;const int NPoints = board_w * board_h;const float boardSize = 0.034;cv::Size chessBoardSize = cv::Size(board_w, board_h);std::vector<cv::Point3f> object;for( int j = 0; j < NPoints; j++ ){object.push_back(cv::Point3f((j % board_w) * boardSize, (j / board_w) * boardSize, 0));}std::vector<std::vector<cv::Point3f> > objectv;std::vector<std::vector<cv::Point2f> > imagevL, imagevR;int height, width;cv::Mat imageL, grayImageL, imageR, grayImageR;std::vector<cv::Mat> imgsL, imgsR;for( int i = 0; i < (int)imgNamesL.size(); i++ ){imageL = cv::imread(imgNamesL[i]);imageR = cv::imread(imgNamesR[i]);imgsL.push_back(imageL);imgsR.push_back(imageR);if(imageL.empty()){std::cout << imgNamesL[i] << " is empty!" << std::endl;break;}if(imageR.empty()){std::cout << imgNamesR[i] << " is empty!" << std::endl;}height = imageL.rows;width = imageL.cols;if(imageL.channels() == 3){cv::cvtColor(imageL, grayImageL, CV_BGR2GRAY);}if(imageR.channels() == 3){cv::cvtColor(imageR, grayImageR, CV_BGR2GRAY);}IplImage tempgrayL = grayImageL;IplImage tempgrayR = grayImageR;bool findchessboardL = cvCheckChessboard(&tempgrayL, chessBoardSize);bool findchessboardR = cvCheckChessboard(&tempgrayR, chessBoardSize);std::vector<cv::Point2f> tempCornersL, tempCornersR;if(findchessboardL && findchessboardR){bool find_corners_resultL = cv::findChessboardCorners(grayImageL, chessBoardSize, tempCornersL, 3);bool find_corners_resultR = cv::findChessboardCorners(grayImageR, chessBoardSize, tempCornersR, 3);if(find_corners_resultL && find_corners_resultR){std::cout << "Capture " << imgNamesL[i] << std::endl;cv::cornerSubPix(grayImageL, tempCornersL, cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));cv::cornerSubPix(grayImageR, tempCornersR, cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));cv::drawChessboardCorners(imageL, chessBoardSize, tempCornersL, find_corners_resultL);cv::drawChessboardCorners(imageR, chessBoardSize, tempCornersR, find_corners_resultR);cv::imshow("L", imageL);cv::imshow("R", imageR);cv::waitKey();objectv.push_back(object);imagevL.push_back(tempCornersL);imagevR.push_back(tempCornersR);}else{std::cout << "find_corners_resultL: " << find_corners_resultL << " find_corners_resultR: " << find_corners_resultR << std::endl;cv::imshow("L", imageL);cv::imshow("R", imageR);cv::waitKey();}}else{std::cout << "findchessboardL: " << findchessboardL << " findchessboardR: " << findchessboardR << std::endl;cv::imshow("L", imageL);cv::imshow("R", imageR);cv::waitKey();}tempCornersL.clear();tempCornersR.clear();}//標定左右兩個相機的內參std::vector<cv::Mat> rvecsL, tvecsL, rvecsR, tvecsR;cv::Mat intrinsicL, distortionL, intrinsicR, distortionR;//double errL = cv::calibrateCamera(objectv, imagevL, cv::Size(width, height), intrinsicL, distortionL, rvecsL, tvecsL);int flag = 0; flag |= cv::fisheye::CALIB_RECOMPUTE_EXTRINSIC; //flag |= cv::fisheye::CALIB_CHECK_COND; flag |= cv::fisheye::CALIB_FIX_SKEW; //flag |= cv::fisheye::CALIB_USE_INTRINSIC_GUESS;double errL = cv::fisheye::calibrate(objectv, imagevL, cv::Size(width, height), intrinsicL, distortionL, cv::noArray(), cv::noArray(), flag, cv::TermCriteria(3, 20, 1e-6)); // cv::Mat mapx, mapy;// cv::Mat corrected;// cv::Size corrected_size(width, height);// cv::fisheye::initUndistortRectifyMap(intrinsicL, distortionL, cv::Matx33d::eye(), intrinsicL, // corrected_size, CV_16SC2, mapx, mapy); std::cout << "左相機內參-------------------------" << std::endl;std::cout << "errL: " << errL << std::endl;std::cout << "intrinsicL: " << std::endl;std::cout << intrinsicL << std::endl;std::cout << "distortionL: " << std::endl;std::cout << distortionL << std::endl;std::cout << "右相機內參-------------------------" << std::endl;//double errR = cv::calibrateCamera(objectv, imagevR, cv::Size(width, height), intrinsicR, distortionR, rvecsR, tvecsR);double errR = cv::fisheye::calibrate(objectv, imagevR, cv::Size(width, height), intrinsicR, distortionR, cv::noArray(), cv::noArray(), flag, cv::TermCriteria(3, 20, 1e-6)); std::cout << "errR: " << errR << std::endl;std::cout << "intrinsicR: " << std::endl;std::cout << intrinsicR << std::endl;std::cout << "distortionR: " << std::endl;std::cout << distortionR << std::endl;//標定左右兩個相機之間的外參cv::Mat E, F, R, t;cv::Size imgSize(width, height);//double errLR = cv::stereoCalibrate(objectv, imagevL, imagevR, intrinsicL, distortionL, intrinsicR, distortionR, //imgSize, R, t, E, F);double errLR = cv::fisheye::stereoCalibrate(objectv, imagevL, imagevR, intrinsicL, distortionL, intrinsicR, distortionR, imgSize,R, t);std::cout << "外參--------------------------------" << std::endl;std::cout << "errLR: " << errLR << std::endl;std::cout << "R: " << std::endl;std::cout << R << std::endl;std::cout << "t: " << std::endl;std::cout << t << std::endl;//立體校正cv::Mat Rl, Rr, Pl, Pr, Q;//cv::stereoRectify(intrinsicL, distortionL, intrinsicR, distortionR, imgSize, R, t, Rl, Rr, Pl, Pr, Q);cv::fisheye::stereoRectify(intrinsicL, distortionL, intrinsicR, distortionR, imgSize,R, t, Rl, Rr, Pl, Pr, Q,cv::CALIB_ZERO_DISPARITY,imgSize);cv::Mat Trl = cv::Mat::eye(4, 4, CV_64FC1);R.copyTo(Trl.rowRange(0, 3).colRange(0, 3));t.copyTo(Trl.rowRange(0, 3).col(3));std::string fileName = "stereoRectify.yaml";saveCameraParams(fileName, height, width, distortionL, intrinsicL, Rl, Pl, height, width, distortionR, intrinsicR, Rr, Pr, t, Trl);cv::Mat lmapx, lmapy, rmapx, rmapy;//cv::initUndistortRectifyMap(intrinsicL, distortionL, Rl, Pl, imgSize, CV_32F, lmapx, lmapy);//cv::initUndistortRectifyMap(intrinsicR, distortionR, Rr, Pr, imgSize, CV_32F, rmapx, rmapy);cv::fisheye::initUndistortRectifyMap(intrinsicL, distortionL, Rl, Pl, imgSize, CV_32F, lmapx, lmapy);cv::fisheye::initUndistortRectifyMap(intrinsicR, distortionR, Rr, Pr, imgSize, CV_32F, rmapx, rmapy);int num = 0;cv::Mat imLeftRect, imRightRect;while(num < (int)imgsL.size()){cv::Mat imgL = imgsL[num];cv::Mat imgR = imgsR[num];if(imgL.empty() || imgR.empty()){break;}cv::remap(imgL, imLeftRect, lmapx, lmapy, cv::INTER_LINEAR);cv::remap(imgR, imRightRect, rmapx, rmapy, cv::INTER_LINEAR);cv::Mat imgRectShow;imgRectShow.create(height, width * 2, CV_8UC3);imLeftRect.copyTo(imgRectShow.rowRange(0, height).colRange(0, width));imRightRect.copyTo(imgRectShow.rowRange(0, height).colRange(width, 2*width));int allLineNum = 20;for( int lineNum = 0; lineNum < allLineNum; lineNum++ ){cv::Point2f pt1(0, lineNum * 20);cv::Point2f pt2(imgRectShow.cols-1,lineNum * 20);cv::line(imgRectShow, pt1, pt2, cv::Scalar(0, 0, 255));}cv::imshow("imgRectShow", imgRectShow);cv::waitKey(0);num++;}return 0; }

總結

以上是生活随笔為你收集整理的鱼眼摄像头 单目标定 双目标定的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。