-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathVideoDrone.py
108 lines (90 loc) · 4.29 KB
/
VideoDrone.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from djitellopy import Tello
import cv2 as cv
import threading
import mediapipe as mp
import numpy as np
class VideoDrone(Tello):
def __init__(self):
super().__init__()
self.connect()
self.streamon()
# !! Don't use this, use get_frame() instead.
self.frame = None
self.dist = 1.0
def start_stream(self):
v = threading.Thread(target=self.get_video)
v.daemon = True
v.start()
def get_frame(self):
try:
frame = self.get_frame_read().frame
except:
return self.frame
else:
self.frame = frame
return self.frame
def get_video(self):
# get video stream from drone
while True:
frame = self.get_frame()
frame = cv.resize(frame, (1000, 1000))
#frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
cv.imshow("Drone camera", frame)
cv.waitKey(1)
# Returns facial keypoints if found, if no face is found or could not get frame
# outputs None
def detect_face(self):
detection = mp.solutions.face_detection
try:
frame = cv.cvtColor(self.get_frame(), cv.COLOR_BGR2RGB)
except:
return None
points = {}
with detection.FaceDetection(model_selection = 0, min_detection_confidence = 0.5) as face_detection:
results = face_detection.process(frame)
if results.detections:
points = {
"nose" : detection.get_key_point(results.detections[0], detection.FaceKeyPoint.NOSE_TIP),
"left_ear" : detection.get_key_point(results.detections[0], detection.FaceKeyPoint.LEFT_EAR_TRAGION),
"right_ear" : detection.get_key_point(results.detections[0], detection.FaceKeyPoint.RIGHT_EAR_TRAGION),
"left_eye" : detection.get_key_point(results.detections[0], detection.FaceKeyPoint.LEFT_EYE),
"right_eye" : detection.get_key_point(results.detections[0], detection.FaceKeyPoint.RIGHT_EYE),
"mouth" : detection.get_key_point(results.detections[0], detection.FaceKeyPoint.MOUTH_CENTER)
}
else:
points = None
return points
# returns the distance (in meters to the subject)
def get_distance(self):
mp_face_mesh = mp.solutions.face_mesh
LEFT_IRIS = [474,475, 476, 477]
RIGHT_IRIS = [469, 470, 471, 472]
try:
frame = cv.cvtColor(self.get_frame(), cv.COLOR_BGR2RGB)
except:
return None
with mp_face_mesh.FaceMesh(max_num_faces=1, refine_landmarks=True, min_detection_confidence=0.5, min_tracking_confidence=0.5) as face_mesh:
img_h, img_w = frame.shape[:2]
results = face_mesh.process(frame) # google ai stuff
mesh_points=np.array([np.multiply([p.x, p.y], [img_w, img_h]).astype(int) for p in results.multi_face_landmarks[0].landmark]) #get the general landmarks
(l_cx, l_cy), l_radius = cv.minEnclosingCircle(mesh_points[LEFT_IRIS]) #get a circle encompassing the eyes
(r_cx, r_cy), r_radius = cv.minEnclosingCircle(mesh_points[RIGHT_IRIS])
estFocal = 458.814 #needs to be updated for drone
# this can be done by printing l_radius as you point the drone at you
# using the known distance from your iris (which is 17.7mm wide) to the camera (bring a ruler) can calculate estFocal
# formula in this article (https://pyimagesearch.com/2015/01/19/find-distance-camera-objectmarker-using-python-opencv/) (F = (P x D) / W)
# DM me if confused
estDistance = ((0.0177*estFocal)/(l_radius * 2) + (0.0177*estFocal)/(r_radius * 2))/2 #get average estimated distance of iris
return estDistance
def check_safety(self):
#Safely land the drone in the event of emergency
# battery<10%, temperature>45 degrees, height>3m?
if self.get_battery() < 10 or self.get_temperature() > 45 or self.get_height() > 300:
self.land()
#Put try-catch around Standard drone code
try:
print('flying')
#If there is an error then land
except:
self.land()
#Wait for a command from the user