forked from amanrana20/Computer-Vision-Project
-
Notifications
You must be signed in to change notification settings - Fork 0
/
CVCode_Revision01.cpp
241 lines (156 loc) · 7.56 KB
/
CVCode_Revision01.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
/*
This program is intended to implement SURF feature detector.
We start by capturing a reference image containing the desired object to be captured.
This is followed by starting the video feed again to capture frames from the webcam and use SURF algorithm to detect the desired object in the viddeo feed.
Created by: Team Jaguar
Team members:
1. Aman Rana
2. Syamprasad K. Rajagopalan
3. Rohan Kothari
4.Ulkesh Solanki
*/
// Including all the libraries into the code.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
// Using namespace cv and std so we dont have to write them again in the program.
using namespace cv;
using namespace std;
// global variables
Mat refImage, refDescriptors;
vector<KeyPoint> refKeyPoints, targetKeyPoints;
std::vector<Point2f> ref_obj_corners(4);
Mat outputImage, targetDescriptors;
Mat H, frame;
Mat img_matches;
int i;
// VARIABLES TO FINETUNE THE OBJECT DETECTION
int matchThreshold = 0;
int numberOfMatchingPoints = 5; // This is the size of 'good_matches' vector array, obtained by taking the best matches from 'matches' vector array.
void getReferenceImage() {
// Setting the videoCapture object 'cap' to receive feed from the webcam i.e. (0).
VideoCapture cap(0);
// namedWindow("Display", CV_WND_PROP_OPENGL); // Creates a named Window named 'Dispaly'
// resizeWindow("Display", 700, 437); // Resizes the namedWindow
bool condition = true; // This condition causes the loop to run continuously till it is false
// Capturing the Reference image from the webcam
while (condition == true) {
Mat boxImage1;
cap >> boxImage1;
imshow("Select an object to detect", boxImage1);
cvtColor(boxImage1, boxImage1, CV_BGR2GRAY);
if (waitKey(30) >= 0) {
refImage = boxImage1;
resize(refImage, refImage, Size(700, 437));
condition = false;
} // end if (waitKey(30) >= 0)
} // end while(condition == true)
// // Displaying the captured image for 3000 milli seconds or 3 seconds
// namedWindow("Captured Image",1);
// imshow("BoxImage", refImage);
cout << "Reference image has been captured successfully !" << endl << endl;
// waitKey(3000);
}
void matchFeatures(Mat frame, vector<KeyPoint> targetPoints, Mat targetDesc);
void drawLinesAroundDetectedObject();
void getDefaultReferenceCoordinates();
void drawLinesAroundDetectedObject(vector<DMatch> good_matches);
void getDefaultReferenceCoordinates();
void performSurfDetection(Mat image,bool referenceOrNot) {
vector<KeyPoint> keyPoints; // Vector definition to be used in the SURF algorithm.
// Defining SurfFeatureDetector
int hessian = 2500; // setting the hessian to 2000
SurfFeatureDetector surf(hessian);
// Detecting the keypoints and storing them in keyPoints array
surf.detect(image, keyPoints);
// drawing the keypoints to outputImage using the image and keyPoints
drawKeypoints(image, keyPoints, outputImage, Scalar(255,255,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
// Extracting Descriptors from images
SurfDescriptorExtractor surfDesc;
Mat descriptors;
surfDesc.compute(image, keyPoints, descriptors);
if (referenceOrNot == true) {
refKeyPoints = keyPoints;
refDescriptors = descriptors;
} else {
targetKeyPoints = keyPoints;
targetDescriptors = descriptors;
matchFeatures(image, targetKeyPoints, targetDescriptors);
}
}
void matchFeatures(Mat frame, vector<KeyPoint> targetPoints, Mat targetDesc) {
// Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L2);
vector<DMatch> matches,good_matches;
matcher.match(refDescriptors, targetDesc, matches);
if (matches.size() > 20) {
for (int i = 0; i < matches.size(); i++) {
//cout << (int)matches[i].queryIdx << " " << (int)matches[i].trainIdx << endl;
if (((int) matches[i].queryIdx - (int) matches[i].trainIdx <= matchThreshold) || ((int) matches[i].trainIdx - (int) matches[i].queryIdx <= matchThreshold)) {
good_matches.push_back(matches[i]);
if (good_matches.size() == numberOfMatchingPoints) {
break;
}
}
}
}
if (matches.size() != 0) {
drawMatches(refImage, refKeyPoints, frame, targetPoints, good_matches, img_matches, Scalar(255,255,255));
namedWindow("Detected Object", WINDOW_NORMAL);
//imshow("Detected Object", img_matches);
}else {
cout << "Object not detected. Exiting..." << endl;
waitKey(1000);
}
drawLinesAroundDetectedObject(good_matches);
}
void drawLinesAroundDetectedObject(vector<DMatch> good_matches) {
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ ) {
//-- Get the keypoints from the good matches
obj.push_back( refKeyPoints[ good_matches[i].queryIdx ].pt );
scene.push_back( targetKeyPoints[ good_matches[i].trainIdx ].pt );
}
int centerX = 0, centerY = 0;
for (i = 0; i < good_matches.size(); i++) {
centerX = (int)targetKeyPoints[ good_matches[i].trainIdx ].pt.x + centerX;
centerY = (int)targetKeyPoints[ good_matches[i].trainIdx ].pt.y + centerY;
}
centerX = centerX / i; // averaging the X values after summation
centerY = centerY / i; // averaging the Y values after summation
//circle(frame, Point2d(centerX, centerY), 100, Scalar(0,255,0));
rectangle(frame, Point2i((centerX - refImage.size().height / 8), centerY -
(refImage.size().width / 8)), Point2i((centerX + refImage.size().height / 6), (centerY +
refImage.size().width / 6)), Scalar(0,255,0), 2);
imshow( "Good Matches & Object detection", frame );
}
int main() {
// Get the reference image and store it in Mat refImage.
getReferenceImage();
performSurfDetection(refImage,true);
// Start the video capture again using the webcam.
VideoCapture cap(0);
namedWindow("Display1", CV_WINDOW_NORMAL ); // Creates a named Window named 'Dispaly'
bool condition = true; // This condition causes the loop to run continuously till it is false
Mat frame1;
while(condition == true) {
// Capturing each frame from the webcam and storing it in frame matrix element.
cap >> frame;
cvtColor(frame, frame1, CV_BGR2GRAY);
// Resize the frame to (700,437)
resize(frame, frame, Size(700, 437));
resize(frame1, frame1, Size(700, 437));
// Perform SURF Detection on every frame
performSurfDetection(frame, false); // false - because the frame is not reference Image
// Wait for the user to press any key and then exit.
if (waitKey(30) >= 0)
condition = false;
} // end of while(condition == true)
return 0;
} // end of int main()