-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdirect_sparse.cpp
304 lines (268 loc) · 13.3 KB
/
direct_sparse.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
#include <iostream>
#include <fstream>
#include <list>
#include <vector>
#include <chrono>
#include <ctime>
#include <climits>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/optimization_algorithm_levenberg.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
#include <g2o/core/robust_kernel.h>
#include <g2o/types/sba/types_six_dof_expmap.h>
using namespace std;
using namespace g2o;
/********************************************
* 本节演示了RGBD上的稀疏直接法
********************************************/
// 一次测量的值,包括一个世界坐标系下三维点与一个灰度值
struct Measurement //用结构体同时存储一个三维点坐标及其在某帧下对应的灰度值
{
Measurement ( Eigen::Vector3d p, float g ) : pos_world ( p ), grayscale ( g ) {}
Eigen::Vector3d pos_world;
float grayscale;
};
inline Eigen::Vector3d project2Dto3D ( int x, int y, int d, float fx, float fy, float cx, float cy, float scale ) //把2D坐标转变为3D(相机坐标,非归一化坐标)
//尺度scale设定为1000
{
float zz = float ( d ) /scale;
float xx = zz* ( x-cx ) /fx;
float yy = zz* ( y-cy ) /fy;
return Eigen::Vector3d ( xx, yy, zz );
}
inline Eigen::Vector2d project3Dto2D ( float x, float y, float z, float fx, float fy, float cx, float cy )
//相机坐标系下3D(非归一化相机坐标)变为图像坐标系的2D
{
float u = fx*x/z+cx;
float v = fy*y/z+cy;
return Eigen::Vector2d ( u,v );
}
// 直接法估计位姿
// 输入:测量值(空间点的灰度),新的灰度图,相机内参; 输出:相机位姿
// 返回:true为成功,false失败
bool poseEstimationDirect ( const vector<Measurement>& measurements, cv::Mat* gray, Eigen::Matrix3f& intrinsics, Eigen::Isometry3d& Tcw );
// project a 3d point into an image plane, the error is photometric error
// an unary edge with one vertex SE3Expmap (the pose of camera)
class EdgeSE3ProjectDirect: public BaseUnaryEdge< 1, double, VertexSE3Expmap>
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
EdgeSE3ProjectDirect() {}
EdgeSE3ProjectDirect ( Eigen::Vector3d point, float fx, float fy, float cx, float cy, cv::Mat* image )
: x_world_ ( point ), fx_ ( fx ), fy_ ( fy ), cx_ ( cx ), cy_ ( cy ), image_ ( image )
{}
virtual void computeError()
{
const VertexSE3Expmap* v =static_cast<const VertexSE3Expmap*> ( _vertices[0] );
Eigen::Vector3d x_local = v->estimate().map ( x_world_ );
float x = x_local[0]*fx_/x_local[2] + cx_;
float y = x_local[1]*fy_/x_local[2] + cy_;
// check x,y is in the image
if ( x-4<0 || ( x+4 ) >image_->cols || ( y-4 ) <0 || ( y+4 ) >image_->rows )
{
_error ( 0,0 ) = 0.0;
this->setLevel ( 1 );
}
else
{
_error ( 0,0 ) = getPixelValue ( x,y ) - _measurement; //根据位姿变换后测量到的灰度减去原始的GT灰度
}
}
// plus in manifold
virtual void linearizeOplus( )
{
if ( level() == 1 )
{
_jacobianOplusXi = Eigen::Matrix<double, 1, 6>::Zero();
return;
}
VertexSE3Expmap* vtx = static_cast<VertexSE3Expmap*> ( _vertices[0] );
Eigen::Vector3d xyz_trans = vtx->estimate().map ( x_world_ ); // q in book
double x = xyz_trans[0];
double y = xyz_trans[1];
double invz = 1.0/xyz_trans[2];
double invz_2 = invz*invz;
float u = x*fx_*invz + cx_;
float v = y*fy_*invz + cy_;
// jacobian from se3 to u,v
// NOTE that in g2o the Lie algebra is (\omega, \epsilon), where \omega is so(3) and \epsilon the translation
Eigen::Matrix<double, 2, 6> jacobian_uv_ksai;
//像素坐标对小量求偏导
jacobian_uv_ksai ( 0,0 ) = - x*y*invz_2 *fx_;
jacobian_uv_ksai ( 0,1 ) = ( 1+ ( x*x*invz_2 ) ) *fx_;
jacobian_uv_ksai ( 0,2 ) = - y*invz *fx_;
jacobian_uv_ksai ( 0,3 ) = invz *fx_;
jacobian_uv_ksai ( 0,4 ) = 0;
jacobian_uv_ksai ( 0,5 ) = -x*invz_2 *fx_;
jacobian_uv_ksai ( 1,0 ) = - ( 1+y*y*invz_2 ) *fy_;
jacobian_uv_ksai ( 1,1 ) = x*y*invz_2 *fy_;
jacobian_uv_ksai ( 1,2 ) = x*invz *fy_;
jacobian_uv_ksai ( 1,3 ) = 0;
jacobian_uv_ksai ( 1,4 ) = invz *fy_;
jacobian_uv_ksai ( 1,5 ) = -y*invz_2 *fy_;
Eigen::Matrix<double, 1, 2> jacobian_pixel_uv;
//光度误差对于像素坐标求偏导
jacobian_pixel_uv ( 0,0 ) = ( getPixelValue ( u+1,v )-getPixelValue ( u-1,v ) ) /2;
jacobian_pixel_uv ( 0,1 ) = ( getPixelValue ( u,v+1 )-getPixelValue ( u,v-1 ) ) /2;
//光度误差对于小量求偏导等于先对像素坐标求偏导,再由像素坐标对于小量求偏导
_jacobianOplusXi = jacobian_pixel_uv*jacobian_uv_ksai;
}
// dummy read and write functions because we don't care...
virtual bool read ( std::istream& in ) {}
virtual bool write ( std::ostream& out ) const {}
protected:
// get a gray scale value from reference image (bilinear interpolated)
inline float getPixelValue ( float x, float y )
{
uchar* data = & image_->data[ int ( y ) * image_->step + int ( x ) ];
float xx = x - floor ( x );
float yy = y - floor ( y );
return float (
( 1-xx ) * ( 1-yy ) * data[0] +
xx* ( 1-yy ) * data[1] +
( 1-xx ) *yy*data[ image_->step ] +
xx*yy*data[image_->step+1]
); //双线性插值算法 见书上笔记,根据位姿计算出的像素坐标可能不是整数,求浮点数对应位置的坐标通过双线性插值算法得到。
}
public:
Eigen::Vector3d x_world_; // 3D point in world frame
float cx_=0, cy_=0, fx_=0, fy_=0; // Camera intrinsics
cv::Mat* image_=nullptr; // reference image
};
int main ( int argc, char** argv )
{
if ( argc != 2 )
{
cout<<"usage: useLK path_to_dataset"<<endl;
return 1;
}
srand ( ( unsigned int ) time ( 0 ) ); //设置随机数,用途主要是为了之后画图的时候拿不同颜色的圈圈和线条画
string path_to_dataset = argv[1];
string associate_file = path_to_dataset + "/associate.txt";
ifstream fin ( associate_file );
string rgb_file, depth_file, time_rgb, time_depth;
cv::Mat color, depth, gray; //彩色图像之后会转变颜色并存入到灰度图像中去
vector<Measurement> measurements; //定义vector,类型是之前所述的结构体(包含世界坐标系的坐标以及对应的灰度值)
// 相机内参
float cx = 325.5;
float cy = 253.5;
float fx = 518.0;
float fy = 519.0;
float depth_scale = 1000.0;
Eigen::Matrix3f K;
K<<fx,0.f,cx,0.f,fy,cy,0.f,0.f,1.0f;
Eigen::Isometry3d Tcw = Eigen::Isometry3d::Identity();
cv::Mat prev_color;
// 我们以第一个图像为参考,对后续图像和参考图像做直接法
for ( int index=0; index<10; index++ )
{
cout<<"*********** loop "<<index<<" ************"<<endl;
fin>>time_rgb>>rgb_file>>time_depth>>depth_file;
color = cv::imread ( path_to_dataset+"/"+rgb_file );
depth = cv::imread ( path_to_dataset+"/"+depth_file, -1 );
if ( color.data==nullptr || depth.data==nullptr )
continue;
cv::cvtColor ( color, gray, cv::COLOR_BGR2GRAY ); //转为灰度图
if ( index ==0 )
{
// 对第一帧提取FAST特征点
vector<cv::KeyPoint> keypoints;
cv::Ptr<cv::FastFeatureDetector> detector = cv::FastFeatureDetector::create();
detector->detect ( color, keypoints ); //对彩色图检测角点
for ( auto kp:keypoints )
{
// 去掉邻近边缘处的点
if ( kp.pt.x < 20 || kp.pt.y < 20 || ( kp.pt.x+20 ) >color.cols || ( kp.pt.y+20 ) >color.rows )
continue;
ushort d = depth.ptr<ushort> ( cvRound ( kp.pt.y ) ) [ cvRound ( kp.pt.x ) ];
//cvRound 把浮点数转化为整数,获取深度,不用双线性插值法因为这里是准确的值不是计算得到的
if ( d==0 )
continue;
//转变为世界坐标系下的坐标:
Eigen::Vector3d p3d = project2Dto3D ( kp.pt.x, kp.pt.y, d, fx, fy, cx, cy, depth_scale );
//获取灰度图对应位置的灰度
float grayscale = float ( gray.ptr<uchar> ( cvRound ( kp.pt.y ) ) [ cvRound ( kp.pt.x ) ] );
//把第一帧图像特征点以及对应灰度图对应位置下的灰度存入Measurement这个结构体中,然后把结构体存进vector中
measurements.push_back ( Measurement ( p3d, grayscale ) );
}
prev_color = color.clone(); //把当前的彩色图克隆进prev_color中
continue;
}
// 使用直接法计算相机运动
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
poseEstimationDirect ( measurements, &gray, K, Tcw ); //传入函数,计算Tcw
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>> ( t2-t1 );
cout<<"direct method costs time: "<<time_used.count() <<" seconds."<<endl;
cout<<"Tcw="<<Tcw.matrix() <<endl;
// plot the feature points
cv::Mat img_show ( color.rows*2, color.cols, CV_8UC3 ); //注意row的大小加倍了,其实是想把前后两帧拼成上下的形式
//新建立的大图img_show的图像左上角坐标0,0,宽为col,高为row,这个区域画方形,把原图copy到这个位置中(不是imshow,不要混淆),其实就是把原图复制一下。
prev_color.copyTo ( img_show ( cv::Rect ( 0,0,color.cols, color.rows ) ) );
//把新来的图copy到下方,同理
color.copyTo ( img_show ( cv::Rect ( 0,color.rows,color.cols, color.rows ) ) );
for ( Measurement m:measurements )
{
if ( rand() > RAND_MAX/5 )
continue;
//读出第一帧图像中的点
Eigen::Vector3d p = m.pos_world;
Eigen::Vector2d pixel_prev = project3Dto2D ( p ( 0,0 ), p ( 1,0 ), p ( 2,0 ), fx, fy, cx, cy );
//依次计算第2,3……帧的图像特征点的像素坐标,如果不在图像内部了,就不要了
Eigen::Vector3d p2 = Tcw*m.pos_world;
Eigen::Vector2d pixel_now = project3Dto2D ( p2 ( 0,0 ), p2 ( 1,0 ), p2 ( 2,0 ), fx, fy, cx, cy );
if ( pixel_now(0,0)<0 || pixel_now(0,0)>=color.cols || pixel_now(1,0)<0 || pixel_now(1,0)>=color.rows )
continue;
float b = 255*float ( rand() ) /RAND_MAX;
float g = 255*float ( rand() ) /RAND_MAX;
float r = 255*float ( rand() ) /RAND_MAX;
//cv::circle(图像,中心点,半径,颜色,线条粗细)
cv::circle ( img_show, cv::Point2d ( pixel_prev ( 0,0 ), pixel_prev ( 1,0 ) ), 8, cv::Scalar ( b,g,r ), 2 );
cv::circle ( img_show, cv::Point2d ( pixel_now ( 0,0 ), pixel_now ( 1,0 ) +color.rows ), 8, cv::Scalar ( b,g,r ), 2 );
//连线
cv::line ( img_show, cv::Point2d ( pixel_prev ( 0,0 ), pixel_prev ( 1,0 ) ), cv::Point2d ( pixel_now ( 0,0 ), pixel_now ( 1,0 ) +color.rows ), cv::Scalar ( b,g,r ), 1 );
}
cv::imshow ( "result", img_show );
cv::waitKey ( 0 );
}
return 0;
}
bool poseEstimationDirect ( const vector< Measurement >& measurements, cv::Mat* gray, Eigen::Matrix3f& K, Eigen::Isometry3d& Tcw )
{
// 初始化g2o
typedef g2o::BlockSolver<g2o::BlockSolverTraits<6,1>> DirectBlock; // 求解的向量是6*1的,pose维度为6,此篇不对位置进行优化,否则是6*3的
DirectBlock::LinearSolverType* linearSolver = new g2o::LinearSolverDense< DirectBlock::PoseMatrixType > ();
DirectBlock* solver_ptr = new DirectBlock ( linearSolver );
// g2o::OptimizationAlgorithmGaussNewton* solver = new g2o::OptimizationAlgorithmGaussNewton( solver_ptr ); // G-N
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg ( solver_ptr ); // L-M
g2o::SparseOptimizer optimizer;
optimizer.setAlgorithm ( solver );
optimizer.setVerbose( true );
g2o::VertexSE3Expmap* pose = new g2o::VertexSE3Expmap();
pose->setEstimate ( g2o::SE3Quat ( Tcw.rotation(), Tcw.translation() ) );
pose->setId ( 0 );
optimizer.addVertex ( pose );
// 添加边
int id=1;
for ( Measurement m: measurements )
{
EdgeSE3ProjectDirect* edge = new EdgeSE3ProjectDirect (
m.pos_world,
K ( 0,0 ), K ( 1,1 ), K ( 0,2 ), K ( 1,2 ), gray
); //初始化边,传入初始化的参数
edge->setVertex ( 0, pose );
edge->setMeasurement ( m.grayscale );
edge->setInformation ( Eigen::Matrix<double,1,1>::Identity() );
edge->setId ( id++ );
optimizer.addEdge ( edge );
}
cout<<"edges in graph: "<<optimizer.edges().size() <<endl;
optimizer.initializeOptimization();
optimizer.optimize ( 30 );
Tcw = pose->estimate();
}