-
Notifications
You must be signed in to change notification settings - Fork 45
/
SDD_Video.py
111 lines (98 loc) · 3.61 KB
/
SDD_Video.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import numpy as np
import time
import cv2
import math
labelsPath = "./coco.names"
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
weightsPath = "./yolov3.weights"
configPath = "./yolov3.cfg"
cap = cv2.VideoCapture('./videos/test_video.mp4')
hasFrame, frame = cap.read()
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
vid_writer = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame.shape[1],frame.shape[0]))
while True:
ret,image=cap.read()
if not ret:
break
image=cv2.resize(image,(640,360))
(H, W) = image.shape[:2]
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(image, 1 / 300.0, (416, 416),swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
print("Frame Prediction Time : {:.6f} seconds".format(end - start))
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > 0.1 and classID == 0:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5,0.3)
ind = []
for i in range(0,len(classIDs)):
if(classIDs[i]==0):
ind.append(i)
a = []
b = []
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
a.append(x)
b.append(y)
distance=[]
nsd = []
for i in range(0,len(a)-1):
for k in range(1,len(a)):
if(k==i):
break
else:
x_dist = (a[k] - a[i])
y_dist = (b[k] - b[i])
d = math.sqrt(x_dist * x_dist + y_dist * y_dist)
distance.append(d)
if(d <=100):
nsd.append(i)
nsd.append(k)
nsd = list(dict.fromkeys(nsd))
color = (0, 0, 255)
for i in nsd:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "Alert"
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
color = (0, 255, 0)
if len(idxs) > 0:
for i in idxs.flatten():
if (i in nsd):
break
else:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = 'OK'
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
cv2.imshow("Social Distancing Detector", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vid_writer.write(image)
vid_writer.release()
cap.release()
cv2.destroyAllWindows()