-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel_for_gesture.py
116 lines (74 loc) · 3.54 KB
/
model_for_gesture.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import numpy as np
import cv2
import keras
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
model = keras.models.load_model(r"C:\Users\abhij\best_model_dataflair3.h5")
background = None
accumulated_weight = 0.5
ROI_top = 100
ROI_bottom = 300
ROI_right = 150
ROI_left = 350
def cal_accum_avg(frame, accumulated_weight):
global background
if background is None:
background = frame.copy().astype("float")
return None
cv2.accumulateWeighted(frame, background, accumulated_weight)
def segment_hand(frame, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), frame)
_ , thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)
#Fetching contours in the frame (These contours can be of hand or any other object in foreground) ...
image, contours, hierarchy = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# If length of contours list = 0, means we didn't get any contours...
if len(contours) == 0:
return None
else:
# The largest external contour should be the hand
hand_segment_max_cont = max(contours, key=cv2.contourArea)
# Returning the hand segment(max contour) and the thresholded image of hand...
return (thresholded, hand_segment_max_cont)
cam = cv2.VideoCapture(0)
num_frames =0
while True:
ret, frame = cam.read()
# filpping the frame to prevent inverted image of captured frame...
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
# ROI from the frame
roi = frame[ROI_top:ROI_bottom, ROI_right:ROI_left]
gray_frame = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray_frame = cv2.GaussianBlur(gray_frame, (9, 9), 0)
if num_frames < 70:
cal_accum_avg(gray_frame, accumulated_weight)
cv2.putText(frame_copy, "FETCHING BACKGROUND...PLEASE WAIT", (80, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0,0,255), 2)
else:
# segmenting the hand region
hand = segment_hand(gray_frame)
# Checking if we are able to detect the hand...
if hand is not None:
thresholded, hand_segment = hand
# Drawing contours around hand segment
cv2.drawContours(frame_copy, [hand_segment + (ROI_right, ROI_top)], -1, (255, 0, 0),1)
cv2.imshow("Thesholded Hand Image", thresholded)
thresholded = cv2.resize(thresholded, (64, 64))
thresholded = cv2.cvtColor(thresholded, cv2.COLOR_GRAY2RGB)
thresholded = np.reshape(thresholded, (1,thresholded.shape[0],thresholded.shape[1],3))
pred = model.predict(thresholded)
cv2.putText(frame_copy, word_dict[np.argmax(pred)], (170, 45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# Draw ROI on frame_copy
cv2.rectangle(frame_copy, (ROI_left, ROI_top), (ROI_right, ROI_bottom), (255,128,0), 3)
# incrementing the number of frames for tracking
num_frames += 1
# Display the frame with segmented hand
cv2.putText(frame_copy, "DataFlair hand sign recognition_ _ _", (10, 20), cv2.FONT_ITALIC, 0.5, (51,255,51), 1)
cv2.imshow("Sign Detection", frame_copy)
# Close windows with Esc
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# Release the camera and destroy all the windows
cam.release()
cv2.destroyAllWindows()