SS8297 commited on
Commit
86f9742
·
1 Parent(s): 36bfa41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -11
app.py CHANGED
@@ -9,6 +9,7 @@ import cv2 as cv
9
  import numpy as np
10
  import math
11
  import torch
 
12
  from PIL import Image
13
  from feat import Detector
14
  from feat.utils import FEAT_EMOTION_COLUMNS
@@ -22,7 +23,6 @@ get_resource_path = _get_resource_path
22
  os.environ["TWILIO_ACCOUNT_SID"] = "ACf1e76f3fd6e9cbca940decc4ed443c20"
23
  os.environ["TWILIO_AUTH_TOKEN"] = "5cadf5cc7120dd995f11b3dc57e46d52"
24
 
25
-
26
  def get_ice_servers():
27
  try:
28
  account_sid = os.environ["TWILIO_ACCOUNT_SID"]
@@ -37,6 +37,28 @@ def get_ice_servers():
37
 
38
  return token.ice_servers
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  def eye_aspect_ratio(eye):
41
 
42
  A = math.dist(eye[1], eye[5])
@@ -77,15 +99,18 @@ def proc_image(img, detector):
77
  is_eye_open = [detect_eyes(face, img, 0.20) for face in detected_landmarks[0]]
78
  eye_dict = {True: "eyes open", False: "eyes closed"}
79
 
80
- detected_emotions = detector.detect_emotions(img, detected_faces, detected_landmarks)
81
- assert len(detected_emotions[0]) == faces_detected, "Number of faces and emotions are mismatched!"
82
-
83
- em = detected_emotions[0]
84
- em_labels = em.argmax(axis=1)
85
-
86
-
87
 
88
- for face, has_open_eyes, label in zip(detected_faces[0], (eye_dict[eyes] for eyes in is_eye_open), em_labels):
 
 
 
 
 
89
  (x0, y0, x1, y1, p) = face
90
  res_scale = img.shape[0]/704
91
  cv.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)), color = (0, 0, 255), thickness = 3)
@@ -94,8 +119,9 @@ def proc_image(img, detector):
94
  cv.putText(img, has_open_eyes, (int(x0)-10, int(y0)-10), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
95
  return img
96
 
97
- def extract_feat():
98
- return [1,2,3,4,5]
 
99
 
100
  def image_processing(frame):
101
  return proc_image(img, detector) if recog else img
 
9
  import numpy as np
10
  import math
11
  import torch
12
+ from torch import nn
13
  from PIL import Image
14
  from feat import Detector
15
  from feat.utils import FEAT_EMOTION_COLUMNS
 
23
  os.environ["TWILIO_ACCOUNT_SID"] = "ACf1e76f3fd6e9cbca940decc4ed443c20"
24
  os.environ["TWILIO_AUTH_TOKEN"] = "5cadf5cc7120dd995f11b3dc57e46d52"
25
 
 
26
  def get_ice_servers():
27
  try:
28
  account_sid = os.environ["TWILIO_ACCOUNT_SID"]
 
37
 
38
  return token.ice_servers
39
 
40
+ class MyNeuralNetwork(nn.Module):
41
+ def __init__(self, layers, dropout):
42
+ super().__init__()
43
+ self.net = nn.Sequential(
44
+ nn.Linear(70, layers[0]),
45
+ nn.LeakyReLU(),
46
+ nn.Dropout(p = dropout[0]),
47
+ nn.Linear(layers[0], layers[1]),
48
+ nn.LeakyReLU(),
49
+ nn.Dropout(p = dropout[1]),
50
+ nn.Linear(layers[1], layers[2]),
51
+ nn.LeakyReLU(),
52
+ nn.Dropout(p = dropout[2]),
53
+ nn.Linear(layers[2], layers[3]),
54
+ nn.LeakyReLU(),
55
+ nn.Dropout(p = dropout[3]),
56
+ nn.Linear(layers[3], 7),
57
+ )
58
+
59
+ def forward(self, inputs):
60
+ return self.net(inputs)
61
+
62
  def eye_aspect_ratio(eye):
63
 
64
  A = math.dist(eye[1], eye[5])
 
99
  is_eye_open = [detect_eyes(face, img, 0.20) for face in detected_landmarks[0]]
100
  eye_dict = {True: "eyes open", False: "eyes closed"}
101
 
102
+ device = (
103
+ "cuda"
104
+ if torch.cuda.is_available()
105
+ else "cpu"
106
+ )
 
 
107
 
108
+ emo_model = torch.load("acc_96.8", map_location=device)
109
+ features = [torch.tensor(np.array(extract_features(*object)).astype(np.float32)).to(device) for object in zip(detected_landmarks[0], detected_faces[0])]
110
+ detected_emotions = [emo_model(facefeat).softmax(dim=0).argmax(dim=0).to("cpu") for facefeat in features]
111
+ assert len(detected_emotions) == faces_detected, "Number of faces and emotions are mismatched!"
112
+
113
+ for face, has_open_eyes, label in zip(detected_faces[0], (eye_dict[eyes] for eyes in is_eye_open), detected_emotions):
114
  (x0, y0, x1, y1, p) = face
115
  res_scale = img.shape[0]/704
116
  cv.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)), color = (0, 0, 255), thickness = 3)
 
119
  cv.putText(img, has_open_eyes, (int(x0)-10, int(y0)-10), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
120
  return img
121
 
122
+ def extract_features(landmarks, face):
123
+ features = [math.dist(landmarks[33], landmark) for landmark in landmarks] + [face[2] - face[0], face[3] - face[1]]
124
+ return features
125
 
126
  def image_processing(frame):
127
  return proc_image(img, detector) if recog else img