-
Notifications
You must be signed in to change notification settings - Fork 3
/
features2d.cc
170 lines (128 loc) · 4.94 KB
/
features2d.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
/* Copyright 2014 Matthieu Tourne */
#include <sstream>
#include <string>
#include <opencv2/features2d/features2d.hpp>
// non-free sift detector
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include "photogram.h"
#include "features2d.h"
#include "util.h"
#ifdef USE_SIFT_GPU
#include "sift_gpu_wrapper.h"
#endif
void ImageFeatures::write(FileStorage &fs) const {
fs << "{"
<< "keypoints" << keypoints
<< "descriptors" << descriptors
<< "}";
}
void ImageFeatures::read(const FileNode &node) {
LOG(DEBUG) << "De-serializing ImageFeatures";
cv::read(node["keypoints"], keypoints);
#ifdef USE_SIFT_GPU
node["descriptors"] >> descriptors;
#else
// descriptor is a Mat
node["descriptors"] >> descriptors;
#endif
}
SiftFeatureDetector opencv_sift_detector;
SiftDescriptorExtractor opencv_sift_extractor;
FlannBasedMatcher flann_matcher;
// for surf, use this
// SurfFeatureDetector opencv_surf_detector;
// SurfDescriptorExtractor opencv_surf_extractor;
int get_features(Mat img_gray, ImageFeatures &features) {
#ifdef USE_SIFT_GPU
LOG(DEBUG) << "using SIFT gpu";
SiftGPUWrapper* siftgpu = SiftGPUWrapper::getInstance();
siftgpu->detect(img_gray, features.keypoints, features.descriptors);
LOG(DEBUG) << "feature descriptors, count: " << features.descriptors.size();
#else
LOG(DEBUG) << "using opencv SIFT";
opencv_sift_detector.detect(img_gray, features.keypoints);
opencv_sift_extractor.compute(img_gray, features.keypoints, features.descriptors);
LOG(DEBUG) << "Found " << features.keypoints.size() << " features";
#endif
return 0;
}
int match_features(ImageFeatures &features1,
ImageFeatures &features2, Matches &matches) {
#ifdef USE_SIFT_GPU
LOG(DEBUG) << "Using GPU bruteforce matcher";
SiftGPUWrapper* siftgpu = SiftGPUWrapper::getInstance();
// each descriptor is 128 element with SIFT
// num is the number of descriptors ..
siftgpu->match(features1.descriptors, features1.descriptors.size() / 128,
features2.descriptors, features2.descriptors.size() / 128,
&matches);
#else
LOG(DEBUG) << "Using a FLANN based matcher";
flann_matcher.match(features1.descriptors, features2.descriptors, matches);
#endif
LOG(DEBUG) << "Matches: " << matches.size();
return 0;
}
// very simple way to get good matches
void get_good_matches(Matches &matches, Matches &good_matches) {
double max_dist = 0; double min_dist = 1000.0;
//-- Quick calculation of max and min distances between keypoints
for(unsigned int i = 0; i < matches.size(); i++) {
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
LOG(DEBUG) << "Matches, max dist: " << max_dist;
LOG(DEBUG) << "Matches, min dist: " << min_dist;
for(unsigned int i = 0; i < matches.size(); i++) {
if (matches[i].distance <= max(4 * min_dist, 0.02)) {
good_matches.push_back(matches[i]);
}
}
LOG(DEBUG) << "Good Matches : " << good_matches.size();
}
void matches2points(const Matches& matches,
ImageFeatures& features1, ImageFeatures& features2,
vector<Point2f>& pts1, vector<Point2f>& pts2) {
pts1.clear();
pts2.clear();
pts1.reserve(matches.size());
pts2.reserve(matches.size());
for (size_t i = 0; i < matches.size(); i++) {
const DMatch& match = matches[i];
pts1.push_back(features1.keypoints[match.queryIdx].pt);
pts2.push_back(features2.keypoints[match.trainIdx].pt);
}
LOG(DEBUG) << "points1: " << pts1.size() << ", points2: " << pts2.size();
}
// clean up the original matches, only keep the ones that satisfy the model
bool get_putative_matches(const Matches &matches, const vector<char> &keypointsInliers,
Matches &output) {
if (matches.size() != keypointsInliers.size()) {
LOG(ERROR) << "matches and inliers size differ";
return false;
}
for (size_t i = 0; i < matches.size(); i++) {
const DMatch& match = matches[i];
if (keypointsInliers[i]) {
output.push_back(match);
}
}
LOG(DEBUG) << "Input size: " << matches.size()
<< ", Output size: " << output.size();
return true;
}
void write_matches_image(const Mat img1, const ImageFeatures &features1,
const Mat img2, const ImageFeatures &features2,
const Matches &matches,
const vector<char> &keypointMask,
const string output) {
Mat img_matches;
drawMatches(img1, features1.keypoints,
img2, features2.keypoints,
matches, img_matches, Scalar::all(-1), Scalar::all(-1),
keypointMask, DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
LOG(DEBUG) << "Writing image: " << output;
imwrite(output.c_str(), img_matches);
}