forked from krystianity/keras-serving
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
83 lines (65 loc) · 2.58 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# this script should be executed from its parent folder e.g. python ./test.py
import cv2
from keras.models import load_model
import numpy as np
from statistics import mode
from utils import preprocess_input
from utils import get_labels
import sys
# parameters
image_path = './images/test_image.jpg'
detection_model_path = './trained_models/haarcascade_frontalface_default.xml'
emotion_model_path = './trained_models/simple_CNN.530-0.65.hdf5'
gender_model_path = './trained_models/simple_CNN.81-0.96.hdf5'
emotion_labels = get_labels('fer2013')
gender_labels = get_labels('imdb')
font = cv2.FONT_HERSHEY_SIMPLEX
x_offset_emotion = 20
y_offset_emotion = 40
x_offset = 30
y_offset = 60
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path)
gender_classifier = load_model(gender_model_path)
frame = cv2.imread(image_path)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
faces = face_detection.detectMultiScale(gray, 1.3, 5)
exportCount = 0
for (x,y,w,h) in faces:
face = frame[(y - y_offset):(y + h + y_offset),
(x - x_offset):(x + w + x_offset)]
gray_face = gray[(y - y_offset_emotion):(y + h + y_offset_emotion),
(x - x_offset_emotion):(x + w + x_offset_emotion)]
try:
face = cv2.resize(face, (48, 48))
gray_face = cv2.resize(gray_face, (48, 48))
except:
continue
cv2.imwrite('./images/cutouts/' + str(exportCount) + '.png', gray_face)
exportCount = exportCount + 1
face = np.expand_dims(face, 0)
face = preprocess_input(face)
gender_label_arg = np.argmax(gender_classifier.predict(face))
gender = gender_labels[gender_label_arg]
gray_face = preprocess_input(gray_face)
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
#print(len(gray_face))
#print(len(gray_face[0]))
#print(len(gray_face[0][0]))
#print(len(gray_face[0][0][0]))
emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
emotion = emotion_labels[emotion_label_arg]
if gender == gender_labels[0]:
gender_color = (0, 0, 255)
else:
gender_color = (255, 0, 0)
cv2.rectangle(frame, (x, y), (x + w, y + h), gender_color, 2)
cv2.putText(frame, emotion, (x, y - 90), font,
2, gender_color, 2, cv2.LINE_AA)
cv2.putText(frame, gender, (x , y - 90 + 70), font,
2, gender_color, 2, cv2.LINE_AA)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.imwrite('./images/predicted_test_image.png', frame)