网站如何做后台,四秒网站建设,网站策划书的撰写流程?,网站开发职业前景v0.1版本的oSLAM实现了基于orb特征点的简单视觉里程计#xff0c;通过连续两帧的rgbd数据实现相机相对位姿的估计。 #mermaid-svg-ibQfHFVHezQD5RWW {font-family:trebuchet ms,verdana,arial,sans-serif;font-size:16px;fill:#333;}#mermaid-svg-ibQfHFVHezQD5RW…v0.1版本的oSLAM实现了基于orb特征点的简单视觉里程计通过连续两帧的rgbd数据实现相机相对位姿的估计。 #mermaid-svg-ibQfHFVHezQD5RWW {font-family:"trebuchet ms",verdana,arial,sans-serif;font-size:16px;fill:#333;}#mermaid-svg-ibQfHFVHezQD5RWW .error-icon{fill:#552222;}#mermaid-svg-ibQfHFVHezQD5RWW .error-text{fill:#552222;stroke:#552222;}#mermaid-svg-ibQfHFVHezQD5RWW .edge-thickness-normal{stroke-width:2px;}#mermaid-svg-ibQfHFVHezQD5RWW .edge-thickness-thick{stroke-width:3.5px;}#mermaid-svg-ibQfHFVHezQD5RWW .edge-pattern-solid{stroke-dasharray:0;}#mermaid-svg-ibQfHFVHezQD5RWW .edge-pattern-dashed{stroke-dasharray:3;}#mermaid-svg-ibQfHFVHezQD5RWW .edge-pattern-dotted{stroke-dasharray:2;}#mermaid-svg-ibQfHFVHezQD5RWW .marker{fill:#333333;stroke:#333333;}#mermaid-svg-ibQfHFVHezQD5RWW .marker.cross{stroke:#333333;}#mermaid-svg-ibQfHFVHezQD5RWW svg{font-family:"trebuchet ms",verdana,arial,sans-serif;font-size:16px;}#mermaid-svg-ibQfHFVHezQD5RWW .label{font-family:"trebuchet ms",verdana,arial,sans-serif;color:#333;}#mermaid-svg-ibQfHFVHezQD5RWW .cluster-label text{fill:#333;}#mermaid-svg-ibQfHFVHezQD5RWW .cluster-label span{color:#333;}#mermaid-svg-ibQfHFVHezQD5RWW .label text,#mermaid-svg-ibQfHFVHezQD5RWW span{fill:#333;color:#333;}#mermaid-svg-ibQfHFVHezQD5RWW .node rect,#mermaid-svg-ibQfHFVHezQD5RWW .node circle,#mermaid-svg-ibQfHFVHezQD5RWW .node ellipse,#mermaid-svg-ibQfHFVHezQD5RWW .node polygon,#mermaid-svg-ibQfHFVHezQD5RWW .node path{fill:#ECECFF;stroke:#9370DB;stroke-width:1px;}#mermaid-svg-ibQfHFVHezQD5RWW .node .label{text-align:center;}#mermaid-svg-ibQfHFVHezQD5RWW .node.clickable{cursor:pointer;}#mermaid-svg-ibQfHFVHezQD5RWW .arrowheadPath{fill:#333333;}#mermaid-svg-ibQfHFVHezQD5RWW .edgePath .path{stroke:#333333;stroke-width:2.0px;}#mermaid-svg-ibQfHFVHezQD5RWW .flowchart-link{stroke:#333333;fill:none;}#mermaid-svg-ibQfHFVHezQD5RWW .edgeLabel{background-color:#e8e8e8;text-align:center;}#mermaid-svg-ibQfHFVHezQD5RWW .edgeLabel rect{opacity:0.5;background-color:#e8e8e8;fill:#e8e8e8;}#mermaid-svg-ibQfHFVHezQD5RWW .cluster rect{fill:#ffffde;stroke:#aaaa33;stroke-width:1px;}#mermaid-svg-ibQfHFVHezQD5RWW .cluster text{fill:#333;}#mermaid-svg-ibQfHFVHezQD5RWW .cluster span{color:#333;}#mermaid-svg-ibQfHFVHezQD5RWW div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:"trebuchet ms",verdana,arial,sans-serif;font-size:12px;background:hsl(80, 100%, 96.2745098039%);border:1px solid #aaaa33;border-radius:2px;pointer-events:none;z-index:100;}#mermaid-svg-ibQfHFVHezQD5RWW :root{--mermaid-font-family:"trebuchet ms",verdana,arial,sans-serif;} 读取RGBD数据 orb特征点提取 PnP/ICP位姿估计 这部分理论上相对简单一点咱们就直接上实现。
VisualOdometer类
VisualOdometer.hpp
#pragma once
#include vector
#include opencv2/opencv.hpp
#include Eigen/Core
#include Eigen/Geometry
#include Frame.hppnamespace oSLAM
{class VisualOdometer{private:std::vectorFrame frames;int max_key_points_num;double cx, cy, fx, fy;double depth_scale;std::vectorcv::DMatch matches;void feature_extract(const cv::Mat rgb, Frame frame);void calc_depth(const cv::Mat depth, Frame frame);void feature_match(const Frame ref, const Frame cur, std::vectorcv::DMatch matches);void calc_pose_relative(const Frame ref, Frame cur, const std::vectorcv::DMatch matches);void pose_estimation_3d2d(const std::vectorcv::Point3d pts1, const std::vectorcv::Point2d pts2, cv::Mat R, cv::Mat t);void pose_estimation_3d3d(const std::vectorcv::Point3d pts1, const std::vectorcv::Point3d pts2, cv::Mat R, cv::Mat t);public:void add(double timestamp, const cv::Mat rgb, const cv::Mat depth);void set_pose(int frame_idx, const cv::Mat R, const cv::Mat T);void get_pose(int frame_idx, cv::Mat R, cv::Mat T);void get_3d_points(int frame_idx, std::vectorcv::Point3d key_points_3d);VisualOdometer(int max_key_points_num, double cx, double cy, double fx, double fy, double depth_scale);~VisualOdometer();};
}VisualOdometer.cpp
#include VisualOdometer.hpp
#include Eigen/Core
#include Eigen/Dense
#include Eigen/SVDusing namespace oSLAM;
using namespace std;
using namespace cv;VisualOdometer::VisualOdometer(int max_key_points_num, double cx, double cy, double fx, double fy, double depth_scale)
{VisualOdometer::max_key_points_num max_key_points_num;VisualOdometer::cx cx;VisualOdometer::cy cy;VisualOdometer::fx fx;VisualOdometer::fy fy;VisualOdometer::depth_scale depth_scale;
}VisualOdometer::~VisualOdometer()
{
}void VisualOdometer::feature_extract(const cv::Mat rgb, Frame frame)
{PtrORB orb_detector ORB::create(max_key_points_num);orb_detector-detect(rgb, frame.key_points);orb_detector-compute(rgb, frame.key_points, frame.descriptors);
}void VisualOdometer::calc_depth(const cv::Mat depth, Frame frame)
{for (int i0;iframe.key_points.size();i){double x frame.key_points[i].pt.x;double y frame.key_points[i].pt.y;double dis depth.atuint16_t(int(y),int(x)) / depth_scale;frame.key_points_3d.push_back(Point3d((x-cx)/fx*dis, (y-cy)/fy*dis, dis));}
}void VisualOdometer::pose_estimation_3d2d(const std::vectorcv::Point3d pts1, const std::vectorcv::Point2d pts2, cv::Mat R, cv::Mat t)
{// 利用PnP求解位姿初值Mat K (Mat_double(3,3) fx, 0, cx, 0, fy, cy,0, 0, 1);Mat rvec, tvec;solvePnPRansac(pts1, pts2, K, Mat::zeros(1, 5, CV_64FC1), rvec, tvec);Rodrigues(rvec, R);t (Mat_double(3,1) tvec.atdouble(0), tvec.atdouble(1), tvec.atdouble(2));// 优化位姿和3D点坐标// ToDo
}void VisualOdometer::pose_estimation_3d3d(const std::vectorcv::Point3d pts1, const std::vectorcv::Point3d pts2, cv::Mat R, cv::Mat t)
{Point3d p1(0, 0, 0), p2(0, 0, 0); // center of massint N pts1.size();for (int i 0; i N; i){p1 pts1[i];p2 pts2[i];}p1 Point3d(Vec3d(p1) / N);p2 Point3d(Vec3d(p2) / N);vectorPoint3d q1(N), q2(N); // remove the centerfor (int i 0; i N; i){q1[i] pts1[i] - p1;q2[i] pts2[i] - p2;}// compute q1*q2^TEigen::Matrix3d W Eigen::Matrix3d::Zero();for (int i 0; i N; i){W Eigen::Vector3d(q1[i].x, q1[i].y, q1[i].z) * Eigen::Vector3d(q2[i].x, q2[i].y, q2[i].z).transpose();}// SVD on WEigen::JacobiSVDEigen::Matrix3d svd(W, Eigen::ComputeFullU | Eigen::ComputeFullV);Eigen::Matrix3d U svd.matrixU();Eigen::Matrix3d V svd.matrixV();Eigen::Matrix3d R_ U * (V.transpose());if (R_.determinant() 0){R_ -R_;}Eigen::Vector3d t_ Eigen::Vector3d(p1.x, p1.y, p1.z) - R_ * Eigen::Vector3d(p2.x, p2.y, p2.z);// convert to cv::MatR (Mat_double(3, 3) R_(0, 0), R_(0, 1), R_(0, 2),R_(1, 0), R_(1, 1), R_(1, 2),R_(2, 0), R_(2, 1), R_(2, 2));t (Mat_double(3, 1) t_(0, 0), t_(1, 0), t_(2, 0));
}void VisualOdometer::calc_pose_relative(const Frame ref, Frame cur, const std::vectorcv::DMatch matches)
{vectorPoint3d ref_key_points_3d, cur_key_points_3d;vectorPoint2d ref_key_points_2d, cur_key_points_2d;// 筛选3D点for(auto match : matches){Point3d ref_key_point_3d ref.key_points_3d[match.queryIdx];Point3d cur_key_point_3d cur.key_points_3d[match.trainIdx];if (ref_key_point_3d.z 0 || cur_key_point_3d.z 0){continue;}ref_key_points_3d.push_back(ref_key_point_3d);cur_key_points_3d.push_back(cur_key_point_3d);ref_key_points_2d.push_back(ref.key_points[match.queryIdx].pt);cur_key_points_2d.push_back(cur.key_points[match.trainIdx].pt);}// 3D点计算位姿Mat R, T;//pose_estimation_3d3d(cur_key_points_3d, ref_key_points_3d, R, T);pose_estimation_3d2d(ref_key_points_3d, cur_key_points_2d, R, T);cur.R R * ref.R;cur.T R * ref.T T;
}void VisualOdometer::feature_match(const Frame ref, const Frame cur, std::vectorcv::DMatch matches)
{vectorDMatch initial_matches;BFMatcher matcher(NORM_HAMMING);matcher.match(ref.descriptors, cur.descriptors, initial_matches);double min_dis initial_matches[0].distance;for(auto match : initial_matches){if (match.distance min_dis)min_dis match.distance;}matches.clear();for(auto match : initial_matches){if (match.distance MAX(min_dis * 2, 30))matches.push_back(match);}
}void VisualOdometer::add(double timestamp, const Mat rgb, const Mat depth)
{Frame frame;frame.time_stamp timestamp;frame.rgb rgb.clone();frame.depth depth.clone();// 提取rgb图像的orb特征点VisualOdometer::feature_extract(rgb, frame);// 提取关键点的深度信息VisualOdometer::calc_depth(depth, frame);// 如果不是第一帧if (VisualOdometer::frames.size() 0){frame.R Mat::eye(3,3,CV_64FC1);frame.T Mat::zeros(3,1,CV_64FC1);}else{// 当前帧与上一帧特征点匹配VisualOdometer::feature_match(VisualOdometer::frames[VisualOdometer::frames.size()-1], frame,VisualOdometer::matches);// 计算相对位姿关系VisualOdometer::calc_pose_relative(VisualOdometer::frames[VisualOdometer::frames.size()-1], frame,VisualOdometer::matches);}// 将当前帧加入队列VisualOdometer::frames.push_back(frame);
}void VisualOdometer::get_pose(int frame_idx, Mat R, Mat T)
{if (VisualOdometer::frames.size() abs(frame_idx)){R Mat();T Mat();return;}else{if (frame_idx 0){R VisualOdometer::frames[frame_idx].R.clone();T VisualOdometer::frames[frame_idx].T.clone();}else{R VisualOdometer::frames[VisualOdometer::frames.size() frame_idx].R.clone();T VisualOdometer::frames[VisualOdometer::frames.size() frame_idx].T.clone();}}
}void VisualOdometer::set_pose(int frame_idx, const cv::Mat R, const cv::Mat T)
{if (VisualOdometer::frames.size() abs(frame_idx)){return;}else{if (frame_idx 0){VisualOdometer::frames[frame_idx].R R.clone();VisualOdometer::frames[frame_idx].T T.clone();}else{VisualOdometer::frames[VisualOdometer::frames.size() frame_idx].R R.clone();VisualOdometer::frames[VisualOdometer::frames.size() frame_idx].T T.clone();}}
}void VisualOdometer::get_3d_points(int frame_idx, std::vectorcv::Point3d key_points_3d)
{if (VisualOdometer::frames.size() abs(frame_idx)){key_points_3d.clear();return;}else{if (frame_idx 0){key_points_3d VisualOdometer::frames[frame_idx].key_points_3d;}else{key_points_3d VisualOdometer::frames[VisualOdometer::frames.size() frame_idx].key_points_3d;}}
}Frame类
#pragma once
#include vector
#include opencv2/opencv.hppnamespace oSLAM
{class Frame{public:std::vectorcv::KeyPoint key_points;cv::Mat descriptors;std::vectorcv::Point3d key_points_3d;cv::Mat R;cv::Mat T;cv::Mat rgb;cv::Mat depth;double time_stamp;};
}最终在rgbd_dataset_freiburg2_desk数据集上测试类一下效果感觉有点拉跑着跑着就飘了红色的是真值绿色的是计算结果等实现了完整的vo在回来分析一下。 结果展示使用了Pangolin和yuntianli91的pangolin_tutorial