Skip to content
This repository was archived by the owner on Dec 22, 2023. It is now read-only.

Commit 8c24176

Browse files
authored
Merge pull request #424 from adityagandhamal/facial-expressions-detection
Add facial expressions detection application
2 parents c25d288 + a2435d8 commit 8c24176

File tree

6 files changed

+1218
-0
lines changed

6 files changed

+1218
-0
lines changed
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# Facial expressions detection
2+
This project detects facial expressions of humans using Deep Learning and Computer Vision with the help of Python.
3+
The facial features for detection are extracted using [OpenCV](https://github.com/opencv/opencv) with the help of the Haar Cascade Classifier `haarcascade_frontalface_default.xml`. You can downlaod the .xml file [here.](https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml)
4+
5+
### Prerequisites
6+
7+
- #### TensorFlow 2.0
8+
9+
- #### Keras
10+
11+
- #### OpenCV
12+
13+
- #### Python>=3.7
14+
15+
- #### Numpy
16+
17+
18+
### Here's what you can do to use this project to detect your facial expressions using the webcam:
19+
20+
1. [Clone](https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/cloning-a-repository) this repo or download the zip folder.
21+
22+
2. Go to the directory of this repo cloned or downloaded onto your local machine and open cmd.
23+
24+
3. Install the prerequisites by running `pip install requirements.txt` in the cmd.
25+
26+
4. Run `detection_on_vid.py` file and try making different expressions like Happy, Sad, Angry and Surprised faces and notice the detections made.
27+
28+
29+
### Here's a snippet
30+
31+
![expression_detection](https://user-images.githubusercontent.com/61016383/94587769-afa85680-02a0-11eb-897f-ffe8d88becbe.gif)
32+
33+
34+
## *Author Name*
35+
ADITYA GANDHAMAL
Binary file not shown.
Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
2+
# IMPORTS
3+
4+
import cv2
5+
import numpy as np
6+
from tensorflow.keras.preprocessing import image
7+
from tensorflow.keras.models import load_model
8+
9+
10+
# LOADING THE MODEL
11+
12+
model = load_model("detection_model.h5")
13+
14+
15+
16+
17+
def face_extraction(frame):
18+
19+
''' Detect faces in a frame and extract them '''
20+
21+
faces = cascade_model.detectMultiScale(frame, 1.1, 5)
22+
23+
for x, y, w, h in faces:
24+
cropped_img = frame[y:y+h, x:x+w]
25+
26+
return cropped_img
27+
28+
29+
30+
31+
32+
def image_processing(frame):
33+
34+
''' Preprocessing of the image for predictions '''
35+
36+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
37+
frame = cv2.resize(frame, (48, 48))
38+
frame = image.img_to_array(frame)
39+
frame = frame/255
40+
frame = np.expand_dims(frame, axis=0)
41+
42+
return frame
43+
44+
45+
46+
47+
48+
def detect_expressions(frame, detection_model):
49+
50+
''' Detect final expressions and return the predictions
51+
done by the detection_model '''
52+
53+
cropped_frame = face_extraction(frame)
54+
55+
test_frame = image_processing(cropped_frame)
56+
57+
prediction = np.argmax(model.predict(test_frame), axis=-1)
58+
59+
return prediction
60+
61+
62+
63+
64+
# LOAD IMAGE
65+
66+
img = cv2.imread("./test_images/Swift2.jpg")
67+
68+
69+
70+
# LOADING HAAR CASCADE CLASSIFIER
71+
72+
cascade_model = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
73+
faces = cascade_model.detectMultiScale(img, 1.1, 10)
74+
75+
76+
77+
font = cv2.FONT_ITALIC
78+
for x, y, w, h in faces:
79+
cv2.rectangle(img, (x, y), (x+w, y+h), (0,255,0), 2)
80+
81+
prediction = detect_expressions(img, model)
82+
83+
if prediction == [0]:
84+
cv2.putText(img, "Angry", (x, y), font, 2, (0, 0, 255), 2)
85+
86+
elif prediction == [1]:
87+
cv2.putText(img, "Happy", (x, y), font, 2, (0, 0, 255), 2)
88+
89+
elif prediction == [2]:
90+
cv2.putText(img, "Sad", (x, y), font, 2, (0, 0, 255), 2)
91+
92+
else:
93+
cv2.putText(img, "Surprised", (x, y), font, 2, (0, 0, 255), 2)
94+
95+
96+
cv2.imshow("img", img)
97+
98+
99+
# Cleaning
100+
101+
cv2.waitKey(0)
102+
cv2.destroyAllWindows()
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
# IMPORTS
2+
3+
import cv2
4+
import numpy as np
5+
from tensorflow.keras.preprocessing import image
6+
from tensorflow.keras.models import load_model
7+
8+
9+
# LOADING THE MODEL
10+
11+
model = load_model("detection_model.h5")
12+
13+
14+
15+
16+
def face_extraction(frame):
17+
18+
''' Detect faces in a frame and extract them '''
19+
20+
faces = cascade_model.detectMultiScale(frame, 1.1, 5)
21+
22+
for x, y, w, h in faces:
23+
cropped_img = frame[y:y+h, x:x+w]
24+
25+
return cropped_img
26+
27+
28+
29+
30+
31+
def image_processing(frame):
32+
33+
''' Preprocessing of the image for predictions '''
34+
35+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
36+
frame = cv2.resize(frame, (48, 48))
37+
frame = image.img_to_array(frame)
38+
frame = frame/255
39+
frame = np.expand_dims(frame, axis=0)
40+
41+
return frame
42+
43+
44+
45+
46+
def detect_expressions(frame, detection_model):
47+
48+
''' Detect final expressions and return the predictions
49+
done by the detection_model '''
50+
51+
cropped_frame = face_extraction(frame)
52+
53+
test_frame = image_processing(cropped_frame)
54+
55+
prediction = np.argmax(model.predict(test_frame), axis=-1)
56+
57+
return prediction
58+
59+
60+
61+
62+
# LOADING HAAR CASCADE CLASSIFIER
63+
64+
cascade_model = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
65+
66+
67+
68+
69+
# CAPTURE VIDEO (ON WEBCAM)
70+
71+
cap = cv2.VideoCapture(0)
72+
73+
while cap.isOpened():
74+
75+
_, frame = cap.read()
76+
77+
78+
try:
79+
80+
faces = cascade_model.detectMultiScale(frame, 1.1, 5) # Detect faces in a frame
81+
82+
for x, y, w, h in faces:
83+
84+
cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255,0), 2)
85+
86+
prediction = detect_expressions(frame, model)
87+
88+
font = cv2.FONT_ITALIC
89+
90+
if prediction == [0]:
91+
cv2.putText(frame, "Angry", (x, y), font, 1, (0, 0, 255), 2)
92+
93+
elif prediction == [1]:
94+
cv2.putText(frame, "Happy", (x, y), font, 1, (0, 0, 255), 2)
95+
96+
elif prediction == [2]:
97+
cv2.putText(frame, "Sad", (x, y), font, 1, (0, 0, 255), 2)
98+
99+
else:
100+
cv2.putText(frame, "Surprised", (x, y), font, 1, (0, 0, 255), 2)
101+
102+
103+
cv2.imshow("frame", frame)
104+
105+
106+
if cv2.waitKey(1) & 0xFF == 27:
107+
break
108+
109+
except Exception:
110+
break
111+
112+
113+
# Cleaning
114+
cap.release()
115+
cv2.destroyAllWindows()

0 commit comments

Comments
 (0)