Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,15 @@ import easyocr
|
|
7 |
import numpy as np
|
8 |
import pandas as pd
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
# ——— Load and preprocess NRC EmoLex ——————————————————————————————————
|
11 |
# Make sure this filename matches exactly what you’ve uploaded
|
12 |
EMOLEX_PATH = "NRC-Emotion-Lexicon-Wordlevel-v0.92.txt"
|
@@ -84,6 +93,7 @@ THRESHOLDS = {
|
|
84 |
# ——— 3) Initialize EasyOCR reader ————————————————————————————————————————————
|
85 |
ocr_reader = easyocr.Reader(["en"], gpu=False)
|
86 |
|
|
|
87 |
# ——— 4) Emotional-Tone Tagging —————————————————————————————————————————————
|
88 |
def get_emotional_tone_tag(emotion_profile, patterns, text_lower):
|
89 |
"""
|
@@ -105,117 +115,118 @@ def get_emotional_tone_tag(emotion_profile, patterns, text_lower):
|
|
105 |
emo: sum(EMOLEX.get(w, {}).get(emo, 0) for w in words)
|
106 |
for emo in ["anger","joy","sadness","fear","disgust"]
|
107 |
}
|
|
|
108 |
# 0. Support override
|
109 |
if lex_counts["joy"] > 0 and any(k in text_lower for k in ["support","hope","grace"]):
|
110 |
return "supportive"
|
111 |
|
112 |
-
# 1. Performative Regret
|
113 |
if sadness > 0.4 \
|
114 |
and (lex_counts["sadness"] > 0 or any(p in patterns for p in ["blame shifting","guilt tripping","recovery phase"])):
|
115 |
return "performative regret"
|
116 |
|
117 |
-
# 2. Coercive Warmth
|
118 |
if (joy > 0.3 or sadness > 0.4) \
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
|
123 |
-
# 3. Cold Invalidation
|
124 |
if (neutral + disgust) > 0.5 \
|
125 |
and lex_counts["disgust"] > 0 \
|
126 |
and any(p in patterns for p in ["dismissiveness","projection","obscure language"]):
|
127 |
-
|
128 |
|
129 |
-
# 4. Genuine Vulnerability
|
130 |
if (sadness + fear) > 0.5 \
|
131 |
and lex_counts["sadness"] > 0 and lex_counts["fear"] > 0 \
|
132 |
and all(p == "recovery phase" for p in patterns):
|
133 |
-
|
134 |
|
135 |
-
# 5. Emotional Threat
|
136 |
if (anger + disgust) > 0.5 \
|
137 |
and (lex_counts["anger"] > 0 or lex_counts["disgust"] > 0) \
|
138 |
and any(p in patterns for p in ["control","threat","insults","dismissiveness"]):
|
139 |
-
|
140 |
|
141 |
-
# 6. Weaponized Sadness
|
142 |
if sadness > 0.6 \
|
143 |
and lex_counts["sadness"] > 0 \
|
144 |
and any(p in patterns for p in ["guilt tripping","projection"]):
|
145 |
-
|
146 |
|
147 |
-
# 7. Toxic Resignation
|
148 |
if neutral > 0.5 \
|
149 |
and any(p in patterns for p in ["dismissiveness","obscure language"]) \
|
150 |
and lex_counts["disgust"] == 0:
|
151 |
-
|
152 |
|
153 |
-
# 8. Indignant Reproach
|
154 |
if anger > 0.5 \
|
155 |
and lex_counts["anger"] > 0 \
|
156 |
and any(p in patterns for p in ["guilt tripping","contradictory statements"]):
|
157 |
-
|
158 |
|
159 |
-
# 9. Confrontational
|
160 |
if anger > 0.6 \
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
|
165 |
-
# 10. Passive Aggression
|
166 |
if neutral > 0.6 \
|
167 |
and lex_counts["disgust"] > 0 \
|
168 |
and any(p in patterns for p in ["dismissiveness","projection"]):
|
169 |
-
|
170 |
|
171 |
-
# 11. Sarcastic Mockery
|
172 |
if joy > 0.3 \
|
173 |
and lex_counts["joy"] > 0 \
|
174 |
and "insults" in patterns:
|
175 |
-
|
176 |
|
177 |
-
# 12. Menacing Threat
|
178 |
if fear > 0.3 \
|
179 |
and lex_counts["fear"] > 0 \
|
180 |
and "threat" in patterns:
|
181 |
-
|
182 |
|
183 |
-
# 13. Pleading Concern
|
184 |
if sadness > 0.3 \
|
185 |
and lex_counts["sadness"] > 0 \
|
186 |
and any(k in text_lower for k in APOLOGY_KEYWORDS) \
|
187 |
and not patterns:
|
188 |
-
|
189 |
|
190 |
-
# 14. Fear-mongering
|
191 |
if (fear + disgust) > 0.5 \
|
192 |
and lex_counts["fear"] > 0 \
|
193 |
and "projection" in patterns:
|
194 |
-
|
195 |
|
196 |
-
# 15. Disbelieving Accusation
|
197 |
if surprise > 0.3 \
|
198 |
and lex_counts["surprise"] > 0 \
|
199 |
and "blame shifting" in patterns:
|
200 |
-
|
201 |
|
202 |
-
# 16. Empathetic Solidarity
|
203 |
if joy > 0.2 and sadness > 0.2 \
|
204 |
and lex_counts["joy"] > 0 and lex_counts["sadness"] > 0 \
|
205 |
and not patterns:
|
206 |
-
|
207 |
|
208 |
-
# 17. Assertive Boundary
|
209 |
if anger > 0.4 \
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
|
214 |
-
# 18. Stonewalling
|
215 |
if neutral > 0.7 \
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
|
220 |
return None
|
221 |
|
|
|
7 |
import numpy as np
|
8 |
import pandas as pd
|
9 |
|
10 |
+
mpqa = {}
|
11 |
+
with open("subj_lexicon.tff") as f:
|
12 |
+
for line in f:
|
13 |
+
fields = dict(tok.split("=") for tok in line.strip().split())
|
14 |
+
word = fields["word1"]
|
15 |
+
strength = fields["type"] # “strongsubj” vs “weaksubj”
|
16 |
+
polarity = fields["priorpolarity"] # “positive” or “negative”
|
17 |
+
mpqa[word] = (strength, polarity)
|
18 |
+
|
19 |
# ——— Load and preprocess NRC EmoLex ——————————————————————————————————
|
20 |
# Make sure this filename matches exactly what you’ve uploaded
|
21 |
EMOLEX_PATH = "NRC-Emotion-Lexicon-Wordlevel-v0.92.txt"
|
|
|
93 |
# ——— 3) Initialize EasyOCR reader ————————————————————————————————————————————
|
94 |
ocr_reader = easyocr.Reader(["en"], gpu=False)
|
95 |
|
96 |
+
|
97 |
# ——— 4) Emotional-Tone Tagging —————————————————————————————————————————————
|
98 |
def get_emotional_tone_tag(emotion_profile, patterns, text_lower):
|
99 |
"""
|
|
|
115 |
emo: sum(EMOLEX.get(w, {}).get(emo, 0) for w in words)
|
116 |
for emo in ["anger","joy","sadness","fear","disgust"]
|
117 |
}
|
118 |
+
|
119 |
# 0. Support override
|
120 |
if lex_counts["joy"] > 0 and any(k in text_lower for k in ["support","hope","grace"]):
|
121 |
return "supportive"
|
122 |
|
123 |
+
# 1. Performative Regret
|
124 |
if sadness > 0.4 \
|
125 |
and (lex_counts["sadness"] > 0 or any(p in patterns for p in ["blame shifting","guilt tripping","recovery phase"])):
|
126 |
return "performative regret"
|
127 |
|
128 |
+
# 2. Coercive Warmth
|
129 |
if (joy > 0.3 or sadness > 0.4) \
|
130 |
+
and (lex_counts["joy"] > 0 or lex_counts["sadness"] > 0) \
|
131 |
+
and any(p in patterns for p in ["control","gaslighting"]):
|
132 |
+
return "coercive warmth"
|
133 |
|
134 |
+
# 3. Cold Invalidation
|
135 |
if (neutral + disgust) > 0.5 \
|
136 |
and lex_counts["disgust"] > 0 \
|
137 |
and any(p in patterns for p in ["dismissiveness","projection","obscure language"]):
|
138 |
+
return "cold invalidation"
|
139 |
|
140 |
+
# 4. Genuine Vulnerability
|
141 |
if (sadness + fear) > 0.5 \
|
142 |
and lex_counts["sadness"] > 0 and lex_counts["fear"] > 0 \
|
143 |
and all(p == "recovery phase" for p in patterns):
|
144 |
+
return "genuine vulnerability"
|
145 |
|
146 |
+
# 5. Emotional Threat
|
147 |
if (anger + disgust) > 0.5 \
|
148 |
and (lex_counts["anger"] > 0 or lex_counts["disgust"] > 0) \
|
149 |
and any(p in patterns for p in ["control","threat","insults","dismissiveness"]):
|
150 |
+
return "emotional threat"
|
151 |
|
152 |
+
# 6. Weaponized Sadness
|
153 |
if sadness > 0.6 \
|
154 |
and lex_counts["sadness"] > 0 \
|
155 |
and any(p in patterns for p in ["guilt tripping","projection"]):
|
156 |
+
return "weaponized sadness"
|
157 |
|
158 |
+
# 7. Toxic Resignation
|
159 |
if neutral > 0.5 \
|
160 |
and any(p in patterns for p in ["dismissiveness","obscure language"]) \
|
161 |
and lex_counts["disgust"] == 0:
|
162 |
+
return "toxic resignation"
|
163 |
|
164 |
+
# 8. Indignant Reproach
|
165 |
if anger > 0.5 \
|
166 |
and lex_counts["anger"] > 0 \
|
167 |
and any(p in patterns for p in ["guilt tripping","contradictory statements"]):
|
168 |
+
return "indignant reproach"
|
169 |
|
170 |
+
# 9. Confrontational
|
171 |
if anger > 0.6 \
|
172 |
+
and lex_counts["anger"] > 0 \
|
173 |
+
and patterns:
|
174 |
+
return "confrontational"
|
175 |
|
176 |
+
# 10. Passive Aggression
|
177 |
if neutral > 0.6 \
|
178 |
and lex_counts["disgust"] > 0 \
|
179 |
and any(p in patterns for p in ["dismissiveness","projection"]):
|
180 |
+
return "passive aggression"
|
181 |
|
182 |
+
# 11. Sarcastic Mockery
|
183 |
if joy > 0.3 \
|
184 |
and lex_counts["joy"] > 0 \
|
185 |
and "insults" in patterns:
|
186 |
+
return "sarcastic mockery"
|
187 |
|
188 |
+
# 12. Menacing Threat
|
189 |
if fear > 0.3 \
|
190 |
and lex_counts["fear"] > 0 \
|
191 |
and "threat" in patterns:
|
192 |
+
return "menacing threat"
|
193 |
|
194 |
+
# 13. Pleading Concern
|
195 |
if sadness > 0.3 \
|
196 |
and lex_counts["sadness"] > 0 \
|
197 |
and any(k in text_lower for k in APOLOGY_KEYWORDS) \
|
198 |
and not patterns:
|
199 |
+
return "pleading concern"
|
200 |
|
201 |
+
# 14. Fear-mongering
|
202 |
if (fear + disgust) > 0.5 \
|
203 |
and lex_counts["fear"] > 0 \
|
204 |
and "projection" in patterns:
|
205 |
+
return "fear-mongering"
|
206 |
|
207 |
+
# 15. Disbelieving Accusation
|
208 |
if surprise > 0.3 \
|
209 |
and lex_counts["surprise"] > 0 \
|
210 |
and "blame shifting" in patterns:
|
211 |
+
return "disbelieving accusation"
|
212 |
|
213 |
+
# 16. Empathetic Solidarity
|
214 |
if joy > 0.2 and sadness > 0.2 \
|
215 |
and lex_counts["joy"] > 0 and lex_counts["sadness"] > 0 \
|
216 |
and not patterns:
|
217 |
+
return "empathetic solidarity"
|
218 |
|
219 |
+
# 17. Assertive Boundary
|
220 |
if anger > 0.4 \
|
221 |
+
and lex_counts["anger"] > 0 \
|
222 |
+
and "control" in patterns:
|
223 |
+
return "assertive boundary"
|
224 |
|
225 |
+
# 18. Stonewalling
|
226 |
if neutral > 0.7 \
|
227 |
+
and lex_counts["disgust"] == 0 \
|
228 |
+
and not patterns:
|
229 |
+
return "stonewalling"
|
230 |
|
231 |
return None
|
232 |
|