-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.py
130 lines (122 loc) · 5.15 KB
/
index.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import cv2
import os
import filetype
import numpy as np
AGE_MODEL = 'weights/deploy_age.prototxt'
AGE_PROTO = 'weights/age_net.caffemodel'
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
AGE_INTERVALS = ['(0, 2)', '(4, 6)', '(8, 12)', '(15, 20)',
'(25, 32)', '(38, 43)', '(48, 53)', '(60, 100)']
FACE_PROTO = "weights/deploy.prototxt.txt"
FACE_MODEL = "weights/res10_300x300_ssd_iter_140000_fp16.caffemodel"
frame_width = 1280
frame_height = 720
face_net = cv2.dnn.readNetFromCaffe(FACE_PROTO, FACE_MODEL)
age_net = cv2.dnn.readNetFromCaffe(AGE_MODEL, AGE_PROTO)
def get_faces(frame, confidence_threshold=0.5):
"""Returns the box coordinates of all detected faces"""
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), (104, 177.0, 123.0))
face_net.setInput(blob)
output = np.squeeze(face_net.forward())
faces = []
for i in range(output.shape[0]):
confidence = output[i, 2]
if confidence > confidence_threshold:
box = output[i, 3:7] * np.array([frame_width, frame_height, frame_width, frame_height])
# convert to integers
start_x, start_y, end_x, end_y = box.astype(np.int)
# widen the box a little
start_x, start_y, end_x, end_y = start_x - \
10, start_y - 10, end_x + 10, end_y + 10
start_x = 0 if start_x < 0 else start_x
start_y = 0 if start_y < 0 else start_y
end_x = 0 if end_x < 0 else end_x
end_y = 0 if end_y < 0 else end_y
# append to our list
faces.append((start_x, start_y, end_x, end_y))
return faces
def display_img(title, img):
"""Displays an image on screen and maintains the output until the user presses a key"""
# Display Image on screen
cv2.imshow(title, img)
# Mantain output until user presses a key
cv2.waitKey(0)
# Destroy windows when user presses a key
cv2.destroyAllWindows()
def get_optimal_font_scale(text, width):
"""Determine the optimal font scale based on the hosting frame width"""
for scale in reversed(range(0, 60, 1)):
textSize = cv2.getTextSize(text, fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=scale/10, thickness=1)
new_width = textSize[0][0]
if (new_width <= width):
return scale/10
return 1
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
return cv2.resize(image, dim, interpolation = inter)
def predict_age(input_path: str):
"""Predict the age of the faces showing in the image"""
# Read Input Image
img = cv2.imread(input_path)
# Take a copy of the initial image and resize it
frame = img.copy()
if frame.shape[1] > frame_width:
frame = image_resize(frame, width=frame_width)
faces = get_faces(frame)
for i, (start_x, start_y, end_x, end_y) in enumerate(faces):
face_img = frame[start_y: end_y, start_x: end_x]
# image --> Input image to preprocess before passing it through our dnn for classification.
blob = cv2.dnn.blobFromImage(
image=face_img, scalefactor=1.0, size=(227, 227),
mean=MODEL_MEAN_VALUES, swapRB=False
)
# Predict Age
age_net.setInput(blob)
age_preds = age_net.forward()
print("="*30, f"Face {i+1} Prediction Probabilities", "="*30)
for i in range(age_preds[0].shape[0]):
print(f"{AGE_INTERVALS[i]}: {age_preds[0, i]*100:.2f}%")
i = age_preds[0].argmax()
age = AGE_INTERVALS[i]
age_confidence_score = age_preds[0][i]
# Draw the box
label = f"Age:{age} - {age_confidence_score*100:.2f}%"
print(label)
# get the position where to put the text
yPos = start_y - 15
while yPos < 15:
yPos += 15
# write the text into the frame
cv2.putText(frame, label, (start_x, yPos),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), thickness=2)
# draw the rectangle around the face
cv2.rectangle(frame, (start_x, start_y), (end_x, end_y), color=(255, 0, 0), thickness=2)
# Display processed image
display_img('Age Estimator', frame)
# save the image if you want
# cv2.imwrite("predicted_age.jpg", frame)
if __name__ == '__main__':
# Parsing command line arguments entered by user
import sys
image_path = 'input.jpg'
predict_age(image_path)