forked from neelanjan00/Face-Recognition-Facenet-
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Preprocess.py
64 lines (57 loc) · 1.84 KB
/
Preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from mtcnn.mtcnn import MTCNN
from PIL import Image
import numpy as np
import os
def extract_face(filename, required_size=(160, 160)):
# load image from file
image = Image.open(filename)
# convert to RGB, if needed
image = image.convert('RGB')
# convert to array
pixels = np.asarray(image)
# create the detector, using default weights
detector = MTCNN()
# detect faces in the image
results = detector.detect_faces(pixels)
# extract the bounding box from the first face
x1, y1, width, height = results[0]['box']
# deal with negative pixel index
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the model size
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = np.asarray(image)
return face_array
def load_face(dir):
faces = list()
# enumerate files
for filename in os.listdir(dir):
path = dir + filename
face = extract_face(path)
faces.append(face)
return faces
def load_dataset(dir):
# list for faces and labels
X, y = list(), list()
for subdir in os.listdir(dir):
path = dir + subdir + '/'
faces = load_face(path)
labels = [subdir for i in range(len(faces))]
print("loaded %d sample for class: %s" % (len(faces),subdir) ) # print progress
X.extend(faces)
y.extend(labels)
return np.asarray(X), np.asarray(y)
def get_embedding(model, face):
# scale pixel values
face = face.astype('float32')
# standardization
mean, std = face.mean(), face.std()
face = (face-mean)/std
# transfer face into one sample (3 dimension to 4 dimension)
sample = np.expand_dims(face, axis=0)
# make prediction to get embedding
yhat = model.predict(sample)
return yhat[0]