SS8297 commited on
Commit
76ca5c2
·
1 Parent(s): ba0c2f9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -131
app.py CHANGED
@@ -12,134 +12,4 @@ from feat.utils import FEAT_EMOTION_COLUMNS
12
  import torch
13
  from PIL import Image
14
 
15
- os.environ["TWILIO_ACCOUNT_SID"] = "ACf1e76f3fd6e9cbca940decc4ed443c20"
16
- os.environ["TWILIO_AUTH_TOKEN"] = "56a1d1ee494933269fe042706392ac9f"
17
-
18
-
19
- def get_ice_servers():
20
- try:
21
- account_sid = os.environ["TWILIO_ACCOUNT_SID"]
22
- auth_token = os.environ["TWILIO_AUTH_TOKEN"]
23
- except KeyError:
24
- logger.warning("TURN credentials are not set. Fallback to a free STUN server from Google.")
25
- return [{"urls": ["stun:stun.l.google.com:19302"]}]
26
-
27
- client = Client(account_sid, auth_token)
28
-
29
- token = client.tokens.create()
30
-
31
- return token.ice_servers
32
-
33
- def eye_aspect_ratio(eye):
34
-
35
- A = math.dist(eye[1], eye[5])
36
- B = math.dist(eye[2], eye[4])
37
-
38
- C = math.dist(eye[0], eye[3])
39
-
40
- ear = (A + B) / (2.0 * C)
41
-
42
- return ear
43
-
44
- def detect_eyes(landmarks, img, threshold):
45
- lm = landmarks
46
- eyes = np.array(lm[36:48], np.int32)
47
-
48
- left_eye = eyes[0:6]
49
- right_eye = eyes[6:12]
50
- ear = max(eye_aspect_ratio(left_eye), eye_aspect_ratio(right_eye))
51
- left_eye = left_eye.reshape((-1,1,2))
52
- right_eye = right_eye.reshape((-1,1,2))
53
- cv.polylines(img, [left_eye], True, (0, 255, 255))
54
- cv.polylines(img, [right_eye], True, (255, 0, 255))
55
-
56
- if (ear > threshold):
57
- return True
58
- else:
59
- return False
60
-
61
- def proc_image(img, detector):
62
- detected_faces = detector.detect_faces(img)
63
- faces_detected = len(detected_faces[0])
64
- if ( faces_detected < 1):
65
- return img
66
-
67
- detected_landmarks = detector.detect_landmarks(img, detected_faces)
68
- assert len(detected_landmarks[0]) == faces_detected, "Number of faces and landsmarks are mismatched!"
69
-
70
- is_eye_open = [detect_eyes(face, img, 0.20) for face in detected_landmarks[0]]
71
- eye_dict = {True: "eyes open", False: "eyes closed"}
72
-
73
- detected_emotions = detector.detect_emotions(img, detected_faces, detected_landmarks)
74
- assert len(detected_emotions[0]) == faces_detected, "Number of faces and emotions are mismatched!"
75
-
76
- em = detected_emotions[0]
77
- em_labels = em.argmax(axis=1)
78
-
79
-
80
-
81
- for face, has_open_eyes, label in zip(detected_faces[0], (eye_dict[eyes] for eyes in is_eye_open), em_labels):
82
- (x0, y0, x1, y1, p) = face
83
- res_scale = img.shape[0]/704
84
- cv.rectangle(img, (int(x0), int(y0)), (int(x1), int(y1)), color = (0, 0, 255), thickness = 3)
85
- cv.putText(img, FEAT_EMOTION_COLUMNS[label], (int(x0)-10, int(y1+25*res_scale*1.5)), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
86
- cv.putText(img, f"{faces_detected } face(s) found", (0, int(25*res_scale*1.5)), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
87
- cv.putText(img, has_open_eyes, (int(x0)-10, int(y0)-10), fontFace = 0, color = (0, 255, 0), thickness = 2, fontScale = res_scale)
88
- return img
89
-
90
- def extract_feat():
91
- return [1,2,3,4,5]
92
-
93
- def image_processing(frame):
94
- return proc_image(img, detector) if recog else img
95
-
96
- def video_frame_callback(frame):
97
- img = frame.to_ndarray(format="bgr24")
98
-
99
- ann = proc_image(img, detector) if recog else img
100
-
101
- return av.VideoFrame.from_ndarray(ann, format="bgr24")
102
-
103
- detector = Detector(face_model="retinaface", landmark_model= "pfld", au_model = "xgb", emotion_model="resmasknet")
104
- source = "Webcam"
105
- recog = True
106
-
107
- source = st.radio(
108
- label = "Image source for emotion recognition",
109
- options = ["Webcam", "Images"],
110
- horizontal = True,
111
- label_visibility = "collapsed",
112
- args = (source, )
113
- )
114
-
115
- has_cam = True if (source == "Webcam") else False
116
-
117
- stream = st.container()
118
- with stream:
119
- if has_cam:
120
- webrtc_streamer(
121
- key="example",
122
- mode=WebRtcMode.SENDRECV,
123
- video_frame_callback=video_frame_callback,
124
- rtc_configuration={ "iceServers": get_ice_servers() },
125
- media_stream_constraints={"video": True, "audio": False},
126
- async_processing=True,
127
- )
128
- else:
129
- pic = st.container()
130
- frame = image_select(
131
- label="Try the classifier on one of the provided examples!",
132
- images=[
133
- "ex1.jpg",
134
- "ex4.jpg",
135
- "ex5.jpg",
136
- "ex6.jpg",
137
- ],
138
- use_container_width= False
139
- )
140
- img = np.array(Image.open(frame))
141
- pic.image(image_processing(img), width = 704)
142
-
143
-
144
- recog = st.toggle(":green[Emotion recogntion]", key = "stream", value = True)
145
-
 
12
  import torch
13
  from PIL import Image
14
 
15
+ st.text(f"{os.getcwt()}")