-
Notifications
You must be signed in to change notification settings - Fork 0
/
template_matching_1.py
135 lines (111 loc) · 5.72 KB
/
template_matching_1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import cv2 as cv
import numpy as np
import numpy.ma as ma
from customRansac import customFindHomography, buildKDTree
from customRansac import customFindHomographyPlane3D
from customRansac import customFindHomographyNormalSampling3D
from customRansac import customFindHomography3DTree
import time
img_scene = cv.imread("3D/2/rgb_image.jpg")
img_object = cv.imread("Templates3/barchette_intera.jpg")
point_cloud = np.load("3D/2/pointCloud.npy") #[height][width][xyz]
#Detect the keypoints using SURF Detector, compute the descriptors
minHessian = 1
detector = cv.xfeatures2d_SIFT.create()
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
#Matching descriptor vectors with a FLANN based matcher
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
#Filter matches using the Lowe's ratio test
ratio_thresh = 0.87
good_matches = []
for m,n in knn_matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
#Draw matches
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
oldSize = len(good_matches)
newSize = -1
j = 0
start = time.time()
###Sequential RANSAC
while((oldSize != newSize) and len(good_matches) >= 20):
oldSize = len(good_matches)
print("Iteration " + str(j))
#Localize the object
obj = np.empty((len(good_matches),2), dtype=np.float32)
scene = np.empty((len(good_matches),2), dtype=np.float32)
for i in range(len(good_matches)):
#Get the keypoints from the good matches
obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
#print("Find homography")
#H, mask = cv.findHomography(obj, scene, cv.RANSAC, confidence = 0.995, ransacReprojThreshold=5)
#H, mask = customFindHomography(obj, scene, 0.4)
#H, mask = customFindHomographyPlane3D(obj, scene, point_cloud, 0.55)
#H, mask = customFindHomographyNormalSampling3D(obj, scene, point_cloud, 0.4, 0.1)
H, mask = customFindHomography3DTree(obj, scene, point_cloud, 0.4)
# H homography from template to scene
H = np.asarray(H)
#Take points from the scene that fits with the homography
img_instance_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
maskk = (mask[:]==[0])
instance_good_matches = ma.masked_array(good_matches, mask=maskk).compressed()
#Remove inliers from good matches array
new_good_matches = np.asarray(list(set(good_matches)-set(instance_good_matches)))
good_matches = new_good_matches
newSize = len(good_matches)
### Show matches fitting the found homography
#cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, instance_good_matches, img_instance_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
#cv.imshow('Good Matches - 1 instance', img_instance_matches)
#cv.imwrite("output_mask" + str(j) + ".jpg", img_instance_matches)
#cv.waitKey()
#Get the corners from the template
obj_corners = np.empty((4,1,2), dtype=np.float32)
obj_corners[0,0,0] = 0
obj_corners[0,0,1] = 0
obj_corners[1,0,0] = img_object.shape[1]
obj_corners[1,0,1] = 0
obj_corners[2,0,0] = img_object.shape[1]
obj_corners[2,0,1] = img_object.shape[0]
obj_corners[3,0,0] = 0
obj_corners[3,0,1] = img_object.shape[0]
try:
#Check for degenerate homography
valid = True
for k in range(0, len(obj_corners)):
x = obj_corners[k,0,0]
y = obj_corners[k,0,1]
if (H[2][0]*x + H[2][1]*y + H[2][2]) / np.linalg.det(H) <= 0:
valid = False
#Draw lines between the corners
scene_corners = cv.perspectiveTransform(obj_corners, H)
#Draw Bounding box on the image
if valid:
end = time.time()
print("-------------------------------DRAWING BOX----------------------------------------")
print("time elapsed:")
print(end - start)
cv.line(img_matches, (int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])),\
(int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[1,0,0] + img_object.shape[1]), int(scene_corners[1,0,1])),\
(int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[2,0,0] + img_object.shape[1]), int(scene_corners[2,0,1])),\
(int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[3,0,0] + img_object.shape[1]), int(scene_corners[3,0,1])),\
(int(scene_corners[0,0,0] + img_object.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
except:
print("Cannot draw bounding box")
print(H)
j += 1
### End Sequential RANSAC
end = time.time()
print("time elapsed:")
print(end - start)
#Show detected matches
cv.imshow('Good Matches', img_matches)
cv.imwrite("output.jpg", img_matches)