#include < iostream>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <Eigen/Core>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/sparse_optimizer.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <sophus/se3.hpp>
#include < chrono>
using namespace std;
using namespace cv;
void find_feature_matches(
const Mat &img_1, const Mat &img_2,
std::vector< KeyPoint> &keypoints_1,
std::vector< KeyPoint> &keypoints_2,
std::vector< DMatch> &matches);
// 像素 坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &p, const Mat &K);
// BA by g2o
typedef vector<Eigen::Vector2d, Eigen::aligned_allocator< Eigen::Vector2d >> VecVector2d;//VecVector存储2d坐标的vector
typedef vector<Eigen::Vector3d, Eigen::aligned_allocator< Eigen::Vector3d >> VecVector3d;
void bundleAdjustmentG2O(
const VecVector3d &points_3d,
const VecVector2d &points_2d,
const Mat &K,
Sophus::SE3d &pose
);
// BA by gauss-newton
void bundleAdjustmentGaussNewton(
const VecVector3d &points_3d,
const VecVector2d &points_2d,
const Mat &K,
Sophus::SE3d &pose
);
int main() {
//-- 读取图像
Mat img_1 = imread("/home/hotfinda/example_slambook/ slambook2 -master/ch7/1.png", IMREAD_COLOR);
Mat img_2 = imread("/home/hotfinda/example_slambook/slambook2-master/ch7/2.png", IMREAD_COLOR);
assert(img_1.data && img_2.data && “Can not load images!”);
vector< KeyPoint> keypoints_1, keypoints_2;
vector< DMatch> matches;
find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);先建立两组匹配的2d图片,之后把其中一张图片的特征点转换为3d的坐标建立要处理的问题
cout << “一共找到了” << matches.size() << “组匹配点” << endl;
// 建立3D点
Mat d1 = imread("/home/hotfinda/example_slambook/slambook2-master/ch7/1_depth.png", -1); // 深度图为16位无符号数,单通道图像
Mat K = (Mat_< double>(3, 3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
vector< Point3f> pts_3d;
vector< Point2f> pts_2d;
for (DMatch m:matches) {
ushort d = d1.ptr< unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)]; 利用深度图片,获取特征点处的像素深度
if (d == 0) // bad depth
continue;
float dd = d / 5000.0;
Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt, K);
pts_3d.