forked from mint-lab/3dv_tutorial
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathimage_stitching.py
49 lines (39 loc) · 1.7 KB
/
image_stitching.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import numpy as np
import cv2
def main():
# Load two images
input1, input2 = "../bin/data/hill01.jpg", "../bin/data/hill02.jpg"
image1 = cv2.imread(input1)
image2 = cv2.imread(input2)
# Retrieve matching points
brisk = cv2.BRISK_create()
keypoints1, descriptors1 = brisk.detectAndCompute(image1, None)
keypoints2, descriptors2 = brisk.detectAndCompute(image2, None)
fmatcher = cv2.DescriptorMatcher_create("BruteForce-Hamming")
match = fmatcher.match(descriptors1, descriptors2)
# Calculate planar homography and merge them
points1, points2 = [], []
for i in range(len(match)):
points1.append(keypoints1[match[i].queryIdx].pt)
points2.append(keypoints2[match[i].trainIdx].pt)
points1 = np.array(points1, dtype=np.float32)
points2 = np.array(points2, dtype=np.float32)
H, inlier_mask = cv2.findHomography(points2, points1, cv2.RANSAC)
merged = cv2.warpPerspective(image2, H, (image1.shape[1]*2, image1.shape[0]))
merged[:,:image1.shape[1]] = image1
merged[:,0:image1.shape[1]] = image1 # copy
cv2.imshow("3DV Tutorial: Image Stitching", merged)
# show the merged image
matched = cv2.drawMatches(img1=image1,
keypoints1=keypoints1,
img2=image2,
keypoints2=keypoints2,
matches1to2=match[:15],
outImg=None)
original = np.hstack((image1, image2))
matched = np.vstack((original, matched))
merged = np.vstack((matched, merged))
cv2.imshow("3DV Tutorial: Image Stitching", merged)
cv2.waitKey(0)
if __name__ == "__main__":
main()