|
| 1 | +import sys |
| 2 | + |
| 3 | +import cv2 |
| 4 | +from keras.models import load_model |
| 5 | +import numpy as np |
| 6 | + |
| 7 | +from utils.datasets import get_labels |
| 8 | +from utils.inference import detect_faces |
| 9 | +from utils.inference import draw_text |
| 10 | +from utils.inference import draw_bounding_box |
| 11 | +from utils.inference import apply_offsets |
| 12 | +from utils.inference import load_detection_model |
| 13 | +from utils.inference import load_image |
| 14 | +from utils.preprocessor import preprocess_input |
| 15 | + |
| 16 | +# parameters for loading data and images |
| 17 | +image_path = 'IMG.jpg' |
| 18 | +detection_model_path = '../trained_models//haarcascade_frontalface_default.xml' |
| 19 | +emotion_model_path = '../trained_models/fer2013_mini_XCEPTION.102-0.66.hdf5' |
| 20 | +gender_model_path = '../trained_models/simple_CNN.81-0.96.hdf5' |
| 21 | +emotion_labels = get_labels('fer2013') |
| 22 | +gender_labels = get_labels('imdb') |
| 23 | +font = cv2.FONT_HERSHEY_SIMPLEX |
| 24 | + |
| 25 | +# hyper-parameters for bounding boxes shape |
| 26 | +gender_offsets = (30, 60) |
| 27 | +gender_offsets = (10, 10) |
| 28 | +emotion_offsets = (20, 40) |
| 29 | +emotion_offsets = (0, 0) |
| 30 | + |
| 31 | +# loading models |
| 32 | +face_detection = load_detection_model(detection_model_path) |
| 33 | +emotion_classifier = load_model(emotion_model_path, compile=False) |
| 34 | +gender_classifier = load_model(gender_model_path, compile=False) |
| 35 | + |
| 36 | +# getting input model shapes for inference |
| 37 | +emotion_target_size = emotion_classifier.input_shape[1:3] |
| 38 | +gender_target_size = gender_classifier.input_shape[1:3] |
| 39 | + |
| 40 | +# loading images |
| 41 | +rgb_image = load_image(image_path, grayscale=False) |
| 42 | +gray_image = load_image(image_path, grayscale=True) |
| 43 | +gray_image = np.squeeze(gray_image) |
| 44 | +gray_image = gray_image.astype('uint8') |
| 45 | + |
| 46 | +faces = detect_faces(face_detection, gray_image) |
| 47 | +for face_coordinates in faces: |
| 48 | + x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets) |
| 49 | + rgb_face = rgb_image[y1:y2, x1:x2] |
| 50 | + |
| 51 | + x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets) |
| 52 | + gray_face = gray_image[y1:y2, x1:x2] |
| 53 | + |
| 54 | + try: |
| 55 | + rgb_face = cv2.resize(rgb_face, (gender_target_size)) |
| 56 | + gray_face = cv2.resize(gray_face, (emotion_target_size)) |
| 57 | + except: |
| 58 | + continue |
| 59 | + |
| 60 | + rgb_face = preprocess_input(rgb_face, False) |
| 61 | + rgb_face = np.expand_dims(rgb_face, 0) |
| 62 | + gender_prediction = gender_classifier.predict(rgb_face) |
| 63 | + gender_label_arg = np.argmax(gender_prediction) |
| 64 | + gender_text = gender_labels[gender_label_arg] |
| 65 | + |
| 66 | + gray_face = preprocess_input(gray_face, True) |
| 67 | + gray_face = np.expand_dims(gray_face, 0) |
| 68 | + gray_face = np.expand_dims(gray_face, -1) |
| 69 | + emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face)) |
| 70 | + emotion_text = emotion_labels[emotion_label_arg] |
| 71 | + |
| 72 | + if gender_text == gender_labels[0]: |
| 73 | + color = (0, 0, 255) |
| 74 | + else: |
| 75 | + color = (255, 0, 0) |
| 76 | + |
| 77 | + draw_bounding_box(face_coordinates, rgb_image, color) |
| 78 | + draw_text(face_coordinates, rgb_image, gender_text, color, 0, -20, 1, 2) |
| 79 | + draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -50, 1, 2) |
| 80 | + |
| 81 | +bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR) |
| 82 | +cv2.imwrite('../images/predicted_test_image4.png', bgr_image) |
0 commit comments