Update app.py
Browse files
app.py
CHANGED
@@ -43,7 +43,7 @@ def eye_aspect_ratio(eye):
|
|
43 |
|
44 |
def detect_eyes(landmarks, img, threshold):
|
45 |
lm = landmarks
|
46 |
-
eyes = np.array(lm[
|
47 |
|
48 |
left_eye = eyes[0:6]
|
49 |
right_eye = eyes[6:12]
|
@@ -60,23 +60,35 @@ def detect_eyes(landmarks, img, threshold):
|
|
60 |
|
61 |
def proc_image(img, detector):
|
62 |
detected_faces = detector.detect_faces(img)
|
63 |
-
|
|
|
64 |
return img
|
|
|
65 |
detected_landmarks = detector.detect_landmarks(img, detected_faces)
|
|
|
|
|
|
|
|
|
|
|
66 |
detected_emotions = detector.detect_emotions(img, detected_faces, detected_landmarks)
|
67 |
-
|
68 |
-
eye_dict = {True: "Eyes Open", False: "Eyes Closed"}
|
69 |
|
70 |
em = detected_emotions[0]
|
71 |
em_labels = em.argmax(axis=1)
|
72 |
|
73 |
-
|
|
|
|
|
74 |
(x0, y0, x1, y1, p) = face
|
75 |
-
|
76 |
-
cv.
|
77 |
-
cv.putText(img,
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
80 |
|
81 |
def image_processing(frame):
|
82 |
return proc_image(img, detector) if recog else img
|
@@ -118,10 +130,10 @@ with stream:
|
|
118 |
frame = image_select(
|
119 |
label="Try the classifier on one of the provided examples!",
|
120 |
images=[
|
121 |
-
"ex0.jpg",
|
122 |
"ex1.jpg",
|
123 |
-
"
|
124 |
-
"
|
|
|
125 |
],
|
126 |
use_container_width= False
|
127 |
)
|
|
|
43 |
|
44 |
def detect_eyes(landmarks, img, threshold):
|
45 |
lm = landmarks
|
46 |
+
eyes = np.array(lm[36:48], np.int32)
|
47 |
|
48 |
left_eye = eyes[0:6]
|
49 |
right_eye = eyes[6:12]
|
|
|
60 |
|
61 |
def proc_image(img, detector):
|
62 |
detected_faces = detector.detect_faces(img)
|
63 |
+
faces_detected = len(detected_faces[0])
|
64 |
+
if ( faces_detected < 1):
|
65 |
return img
|
66 |
+
|
67 |
detected_landmarks = detector.detect_landmarks(img, detected_faces)
|
68 |
+
assert len(detected_landmarks[0]) == faces_detected, "Number of faces and landsmarks are mismatched!"
|
69 |
+
|
70 |
+
is_eye_open = [detect_eyes(face, img, 0.20) for face in detected_landmarks[0]]
|
71 |
+
eye_dict = {True: "eyes open", False: "eyes closed"}
|
72 |
+
|
73 |
detected_emotions = detector.detect_emotions(img, detected_faces, detected_landmarks)
|
74 |
+
assert len(detected_emotions[0]) == faces_detected, "Number of faces and emotions are mismatched!"
|
|
|
75 |
|
76 |
em = detected_emotions[0]
|
77 |
em_labels = em.argmax(axis=1)
|
78 |
|
79 |
+
|
80 |
+
|
81 |
+
for face, has_open_eyes, label in zip(detected_faces[0], (eye_dict[eyes] for eyes in is_eye_open), em_labels):
|
82 |
(x0, y0, x1, y1, p) = face
|
83 |
+
res_scale = img.shape[0]/704
|
84 |
+
cv.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)), color = (0, 0, 255), thickness = 3)
|
85 |
+
cv.putText(img, FEAT_EMOTION_COLUMNS[label], (int(x0)-10, int(y1+25*res_scale*1.5)), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
|
86 |
+
cv.putText(img, f"{faces_detected } face(s) found", (0, int(25*res_scale*1.5)), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
|
87 |
+
cv.putText(img, has_open_eyes, (int(x0)-10, int(y0)-10), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
|
88 |
+
return img
|
89 |
+
|
90 |
+
def extract_feat():
|
91 |
+
return [1,2,3,4,5]
|
92 |
|
93 |
def image_processing(frame):
|
94 |
return proc_image(img, detector) if recog else img
|
|
|
130 |
frame = image_select(
|
131 |
label="Try the classifier on one of the provided examples!",
|
132 |
images=[
|
|
|
133 |
"ex1.jpg",
|
134 |
+
"ex4.jpg",
|
135 |
+
"ex5.jpg",
|
136 |
+
"ex6.jpg",
|
137 |
],
|
138 |
use_container_width= False
|
139 |
)
|