-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathfaceRecognition.py
76 lines (60 loc) · 2.22 KB
/
faceRecognition.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 20 05:29:00 2018
@author: Vedantika Chaudhary
"""
# import the necessary packages
#from imutils.video import VideoStream
import face_recognition
#import argparse
#import imutils
import pickle
import time
import cv2
# construct the argument parser and parse the arguments
PATH_TO_ENCODING = "/Users/zak/Desktop/Project_Theia/users/collin.txt"
DETECTION_METHOD = "hog" # or "cnn"
MAX = 10
ACCURACY = 8
# load the known faces and embeddings
def recognize():
data = pickle.loads(open(PATH_TO_ENCODING, "rb").read())
# initialize the video stream and pointer to output video file, then
# allow the camera sensor to warm up
camera = cv2.VideoCapture(0)
time.sleep(2)
numCorrect = 0
checks = 0
# loop over frames from the video file stream
while checks <= MAX:
# grab the frame from the threaded video stream
valid, frame = camera.read()
if not valid:
print("Unable to connect to camera \n")
break
# convert the input frame from BGR to RGB then resize it to have
# a width of 750px (to speedup processing)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
height, width, layers = frame.shape
rgb = cv2.resize(frame, (750, int(((750*height)/width)) ))
r = frame.shape[1] / float(rgb.shape[1])
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input frame, then compute
# the facial embeddings for each face
boxes = face_recognition.face_locations(rgb,
model=DETECTION_METHOD)
encoding = face_recognition.face_encodings(rgb, boxes)
encod = len(encoding)
if(encod == 0): return False
# loop over the facial embeddings
# attempt to match each face in the input image to our known
# encodings
match = face_recognition.compare_faces(data["encodings"],
encoding[0])
checks = checks + 1
# check to see if we have found a match
if match[0]:
numCorrect = numCorrect + 1
return (numCorrect >= ACCURACY)
if __name__ == '__main__':
x = recognize()