SS8297 commited on
Commit
01d24cc
·
1 Parent(s): 9aab27b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -12,9 +12,10 @@ import torch
12
  from torch import nn
13
  from PIL import Image
14
  from feat import Detector
15
- from feat.utils import FEAT_EMOTION_COLUMNS
16
  from feat.utils.io import get_resource_path
17
 
 
 
18
  def _get_resource_path():
19
  return "/home/user/app/resources"
20
 
@@ -114,7 +115,7 @@ def proc_image(img, detector):
114
  (x0, y0, x1, y1, p) = face
115
  res_scale = img.shape[0]/704
116
  cv.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)), color = (0, 0, 255), thickness = 3)
117
- cv.putText(img, FEAT_EMOTION_COLUMNS[label], (int(x0)-10, int(y1+25*res_scale*1.5)), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
118
  cv.putText(img, f"{faces_detected } face(s) found", (0, int(25*res_scale*1.5)), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
119
  cv.putText(img, has_open_eyes, (int(x0)-10, int(y0)-10), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
120
  return img
@@ -124,7 +125,8 @@ def extract_features(landmarks, face):
124
  return features
125
 
126
  def image_processing(img):
127
- return proc_image(img, detector) if recog else img
 
128
 
129
  def video_frame_callback(frame):
130
  img = frame.to_ndarray(format="bgr24")
@@ -170,6 +172,7 @@ with stream:
170
  media_stream_constraints={"video": True, "audio": False},
171
  async_processing=True,
172
  )
 
173
  else:
174
  pic = st.container()
175
  frame = image_select(
@@ -183,8 +186,4 @@ with stream:
183
  use_container_width= False
184
  )
185
  img = np.array(Image.open(frame))
186
- pic.image(image_processing(img), use_column_width = "always")
187
-
188
-
189
- recog = st.toggle(":green[Emotion recogntion]", key = "stream", value = True)
190
-
 
12
  from torch import nn
13
  from PIL import Image
14
  from feat import Detector
 
15
  from feat.utils.io import get_resource_path
16
 
17
+ CLASS_LABELS = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
18
+
19
  def _get_resource_path():
20
  return "/home/user/app/resources"
21
 
 
115
  (x0, y0, x1, y1, p) = face
116
  res_scale = img.shape[0]/704
117
  cv.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)), color = (0, 0, 255), thickness = 3)
118
+ cv.putText(img, CLASS_LABELS[label], (int(x0)-10, int(y1+25*res_scale*1.5)), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
119
  cv.putText(img, f"{faces_detected } face(s) found", (0, int(25*res_scale*1.5)), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
120
  cv.putText(img, has_open_eyes, (int(x0)-10, int(y0)-10), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
121
  return img
 
125
  return features
126
 
127
  def image_processing(img):
128
+ ann = proc_image(img, detector) if recog else img
129
+ return ann
130
 
131
  def video_frame_callback(frame):
132
  img = frame.to_ndarray(format="bgr24")
 
172
  media_stream_constraints={"video": True, "audio": False},
173
  async_processing=True,
174
  )
175
+ recog = st.toggle(":green[Emotion recogntion]", key = "stream", value = True)
176
  else:
177
  pic = st.container()
178
  frame = image_select(
 
186
  use_container_width= False
187
  )
188
  img = np.array(Image.open(frame))
189
+ pic.image(image_processing(img), use_column_width = "always")