SamanthaStorm commited on
Commit
666c665
Β·
verified Β·
1 Parent(s): cf7ed0f

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +796 -0
app.py ADDED
@@ -0,0 +1,796 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ from transformers import pipeline, RobertaForSequenceClassification, RobertaTokenizer
5
+ from motif_tagging import detect_motifs
6
+ import re
7
+ import matplotlib.pyplot as plt
8
+ import io
9
+ from PIL import Image
10
+ from datetime import datetime
11
+ from transformers import pipeline as hf_pipeline # prevent name collision with gradio pipeline
12
+
13
+ def get_emotion_profile(text):
14
+ emotions = emotion_pipeline(text)
15
+ if isinstance(emotions, list) and isinstance(emotions[0], list):
16
+ emotions = emotions[0]
17
+ return {e['label'].lower(): round(e['score'], 3) for e in emotions}
18
+ # Emotion model (no retraining needed)
19
+ emotion_pipeline = hf_pipeline(
20
+ "text-classification",
21
+ model="j-hartmann/emotion-english-distilroberta-base",
22
+ top_k=None,
23
+ truncation=True
24
+ )
25
+
26
+ # --- Timeline Visualization Function ---
27
+ def generate_abuse_score_chart(dates, scores, labels):
28
+ import matplotlib.pyplot as plt
29
+ import io
30
+ from PIL import Image
31
+ from datetime import datetime
32
+ import re
33
+
34
+ # Determine if all entries are valid dates
35
+ if all(re.match(r"\d{4}-\d{2}-\d{2}", d) for d in dates):
36
+ parsed_x = [datetime.strptime(d, "%Y-%m-%d") for d in dates]
37
+ x_labels = [d.strftime("%Y-%m-%d") for d in parsed_x]
38
+ else:
39
+ parsed_x = list(range(1, len(dates) + 1))
40
+ x_labels = [f"Message {i+1}" for i in range(len(dates))]
41
+
42
+ fig, ax = plt.subplots(figsize=(8, 3))
43
+ ax.plot(parsed_x, scores, marker='o', linestyle='-', color='darkred', linewidth=2)
44
+
45
+ for x, y in zip(parsed_x, scores):
46
+ ax.text(x, y + 2, f"{int(y)}%", ha='center', fontsize=8, color='black')
47
+
48
+ ax.set_xticks(parsed_x)
49
+ ax.set_xticklabels(x_labels)
50
+ ax.set_xlabel("") # No axis label
51
+ ax.set_ylabel("Abuse Score (%)")
52
+ ax.set_ylim(0, 105)
53
+ ax.grid(True)
54
+ plt.tight_layout()
55
+
56
+ buf = io.BytesIO()
57
+ plt.savefig(buf, format='png')
58
+ buf.seek(0)
59
+ return Image.open(buf)
60
+
61
+
62
+ # --- Abuse Model ---
63
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
64
+
65
+ model_name = "SamanthaStorm/tether-multilabel-v3"
66
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
67
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
68
+
69
+ LABELS = [
70
+ "recovery", "control", "gaslighting", "guilt tripping", "dismissiveness", "blame shifting",
71
+ "nonabusive","projection", "insults", "contradictory statements", "obscure language"
72
+ ]
73
+
74
+ THRESHOLDS = {
75
+ "recovery": 0.4,
76
+ "control": 0.45,
77
+ "gaslighting": 0.25,
78
+ "guilt tripping": .20,
79
+ "dismissiveness": 0.25,
80
+ "blame shifting": 0.25,
81
+ "projection": 0.25,
82
+ "insults": 0.05,
83
+ "contradictory statements": 0.25,
84
+ "obscure language": 0.25,
85
+ "nonabusive": 1.0
86
+ }
87
+
88
+ PATTERN_WEIGHTS = {
89
+ "recovery": 0.7,
90
+ "control": 1.4,
91
+ "gaslighting": 1.50,
92
+ "guilt tripping": 1.2,
93
+ "dismissiveness": 0.9,
94
+ "blame shifting": 0.8,
95
+ "projection": 0.5,
96
+ "insults": 1.4,
97
+ "contradictory statements": 1.0,
98
+ "obscure language": 0.9,
99
+ "nonabusive": 0.0
100
+ }
101
+
102
+ ESCALATION_RISKS = {
103
+ "blame shifting": "low",
104
+ "contradictory statements": "moderate",
105
+ "control": "high",
106
+ "dismissiveness": "moderate",
107
+ "gaslighting": "moderate",
108
+ "guilt tripping": "moderate",
109
+ "insults": "moderate",
110
+ "obscure language": "low",
111
+ "projection": "low",
112
+ "recovery phase": "low"
113
+ }
114
+ RISK_STAGE_LABELS = {
115
+ 1: "πŸŒ€ Risk Stage: Tension-Building\nThis message reflects rising emotional pressure or subtle control attempts.",
116
+ 2: "πŸ”₯ Risk Stage: Escalation\nThis message includes direct or aggressive patterns, suggesting active harm.",
117
+ 3: "🌧️ Risk Stage: Reconciliation\nThis message reflects a reset attemptβ€”apologies or emotional repair without accountability.",
118
+ 4: "🌸 Risk Stage: Calm / Honeymoon\nThis message appears supportive but may follow prior harm, minimizing it."
119
+ }
120
+
121
+ ESCALATION_QUESTIONS = [
122
+ ("Partner has access to firearms or weapons", 4),
123
+ ("Partner threatened to kill you", 3),
124
+ ("Partner threatened you with a weapon", 3),
125
+ ("Partner has ever choked you, even if you considered it consensual at the time", 4),
126
+ ("Partner injured or threatened your pet(s)", 3),
127
+ ("Partner has broken your things, punched or kicked walls, or thrown things ", 2),
128
+ ("Partner forced or coerced you into unwanted sexual acts", 3),
129
+ ("Partner threatened to take away your children", 2),
130
+ ("Violence has increased in frequency or severity", 3),
131
+ ("Partner monitors your calls/GPS/social media", 2)
132
+ ]
133
+ DARVO_PATTERNS = [
134
+ "blame shifting", # "You're the reason this happens"
135
+ "projection", # "You're the abusive one"
136
+ "deflection", # "This isn't about that"
137
+ "dismissiveness", # "You're overreacting"
138
+ "insults", # Personal attacks that redirect attention
139
+ "aggression", # Escalates tone to destabilize
140
+ "recovery phase", # Sudden affection following aggression
141
+ "contradictory statements" # β€œI never said that” immediately followed by a version of what they said
142
+ ]
143
+ DARVO_MOTIFS = [
144
+ "I never said that.", "You’re imagining things.", "That never happened.",
145
+ "You’re making a big deal out of nothing.", "It was just a joke.", "You’re too sensitive.",
146
+ "I don’t know what you’re talking about.", "You’re overreacting.", "I didn’t mean it that way.",
147
+ "You’re twisting my words.", "You’re remembering it wrong.", "You’re always looking for something to complain about.",
148
+ "You’re just trying to start a fight.", "I was only trying to help.", "You’re making things up.",
149
+ "You’re blowing this out of proportion.", "You’re being paranoid.", "You’re too emotional.",
150
+ "You’re always so dramatic.", "You’re just trying to make me look bad.",
151
+
152
+ "You’re crazy.", "You’re the one with the problem.", "You’re always so negative.",
153
+ "You’re just trying to control me.", "You’re the abusive one.", "You’re trying to ruin my life.",
154
+ "You’re just jealous.", "You’re the one who needs help.", "You’re always playing the victim.",
155
+ "You’re the one causing all the problems.", "You’re just trying to make me feel guilty.",
156
+ "You’re the one who can’t let go of the past.", "You’re the one who’s always angry.",
157
+ "You’re the one who’s always complaining.", "You’re the one who’s always starting arguments.",
158
+ "You’re the one who’s always making things worse.", "You’re the one who’s always making me feel bad.",
159
+ "You’re the one who’s always making me look like the bad guy.",
160
+ "You’re the one who’s always making me feel like a failure.",
161
+ "You’re the one who’s always making me feel like I’m not good enough.",
162
+
163
+ "I can’t believe you’re doing this to me.", "You’re hurting me.",
164
+ "You’re making me feel like a terrible person.", "You’re always blaming me for everything.",
165
+ "You’re the one who’s abusive.", "You’re the one who’s controlling.", "You’re the one who’s manipulative.",
166
+ "You’re the one who’s toxic.", "You’re the one who’s gaslighting me.",
167
+ "You’re the one who’s always putting me down.", "You’re the one who’s always making me feel bad.",
168
+ "You’re the one who’s always making me feel like I’m not good enough.",
169
+ "You’re the one who’s always making me feel like I’m the problem.",
170
+ "You’re the one who’s always making me feel like I’m the bad guy.",
171
+ "You’re the one who’s always making me feel like I’m the villain.",
172
+ "You’re the one who’s always making me feel like I’m the one who needs to change.",
173
+ "You’re the one who’s always making me feel like I’m the one who’s wrong.",
174
+ "You’re the one who’s always making me feel like I’m the one who’s crazy.",
175
+ "You’re the one who’s always making me feel like I’m the one who’s abusive.",
176
+ "You’re the one who’s always making me feel like I’m the one who’s toxic."
177
+ ]
178
+ def get_emotional_tone_tag(emotions, sentiment, patterns, abuse_score):
179
+ sadness = emotions.get("sadness", 0)
180
+ joy = emotions.get("joy", 0)
181
+ neutral = emotions.get("neutral", 0)
182
+ disgust = emotions.get("disgust", 0)
183
+ anger = emotions.get("anger", 0)
184
+ fear = emotions.get("fear", 0)
185
+ disgust = emotions.get("disgust", 0)
186
+
187
+ # 1. Performative Regret
188
+ if (
189
+ sadness > 0.4 and
190
+ any(p in patterns for p in ["blame shifting", "guilt tripping", "recovery phase"]) and
191
+ (sentiment == "undermining" or abuse_score > 40)
192
+ ):
193
+ return "performative regret"
194
+
195
+ # 2. Coercive Warmth
196
+ if (
197
+ (joy > 0.3 or sadness > 0.4) and
198
+ any(p in patterns for p in ["control", "gaslighting"]) and
199
+ sentiment == "undermining"
200
+ ):
201
+ return "coercive warmth"
202
+
203
+ # 3. Cold Invalidation
204
+ if (
205
+ (neutral + disgust) > 0.5 and
206
+ any(p in patterns for p in ["dismissiveness", "projection", "obscure language"]) and
207
+ sentiment == "undermining"
208
+ ):
209
+ return "cold invalidation"
210
+
211
+ # 4. Genuine Vulnerability
212
+ if (
213
+ (sadness + fear) > 0.5 and
214
+ sentiment == "supportive" and
215
+ all(p in ["recovery phase"] for p in patterns)
216
+ ):
217
+ return "genuine vulnerability"
218
+
219
+ # 5. Emotional Threat
220
+ if (
221
+ (anger + disgust) > 0.5 and
222
+ any(p in patterns for p in ["control", "insults", "dismissiveness"]) and
223
+ sentiment == "undermining"
224
+ ):
225
+ return "emotional threat"
226
+
227
+ # 6. Weaponized Sadness
228
+ if (
229
+ sadness > 0.6 and
230
+ any(p in patterns for p in ["guilt tripping", "projection"]) and
231
+ sentiment == "undermining"
232
+ ):
233
+ return "weaponized sadness"
234
+
235
+ # 7. Toxic Resignation
236
+ if (
237
+ neutral > 0.5 and
238
+ any(p in patterns for p in ["dismissiveness", "obscure language"]) and
239
+ sentiment == "undermining"
240
+ ):
241
+ return "toxic resignation"
242
+ # 8. Aggressive Dismissal
243
+ if (
244
+ anger > 0.5 and
245
+ any(p in patterns for p in ["aggression", "insults", "control"]) and
246
+ sentiment == "undermining"
247
+ ):
248
+ return "aggressive dismissal"
249
+ # 9. Deflective Hostility
250
+ if (
251
+ (0.2 < anger < 0.7 or 0.2 < disgust < 0.7) and
252
+ any(p in patterns for p in ["deflection", "projection"]) and
253
+ sentiment == "undermining"
254
+ ):
255
+ return "deflective hostility"
256
+ # 10. Mocking Detachment
257
+ if (
258
+ (neutral + joy) > 0.5 and
259
+ any(p in patterns for p in ["mockery", "insults", "projection"]) and
260
+ sentiment == "undermining"
261
+ ):
262
+ return "mocking detachment"
263
+ # 11. Contradictory Gaslight
264
+ if (
265
+ (joy + anger + sadness) > 0.5 and
266
+ any(p in patterns for p in ["gaslighting", "contradictory statements"]) and
267
+ sentiment == "undermining"
268
+ ):
269
+ return "contradictory gaslight"
270
+ # 12. Calculated Neutrality
271
+ if (
272
+ neutral > 0.6 and
273
+ any(p in patterns for p in ["obscure language", "deflection", "dismissiveness"]) and
274
+ sentiment == "undermining"
275
+ ):
276
+ return "calculated neutrality"
277
+ # 13. Forced Accountability Flip
278
+ if (
279
+ (anger + disgust) > 0.5 and
280
+ any(p in patterns for p in ["blame shifting", "manipulation", "projection"]) and
281
+ sentiment == "undermining"
282
+ ):
283
+ return "forced accountability flip"
284
+ # 14. Conditional Affection
285
+ if (
286
+ joy > 0.4 and
287
+ any(p in patterns for p in ["apology baiting", "control", "recovery phase"]) and
288
+ sentiment == "undermining"
289
+ ):
290
+ return "conditional affection"
291
+
292
+ if (
293
+ (anger + disgust) > 0.5 and
294
+ any(p in patterns for p in ["blame shifting", "projection", "deflection"]) and
295
+ sentiment == "undermining"
296
+ ):
297
+ return "forced accountability flip"
298
+
299
+ # Emotional Instability Fallback
300
+ if (
301
+ (anger + sadness + disgust) > 0.6 and
302
+ sentiment == "undermining"
303
+ ):
304
+ return "emotional instability"
305
+
306
+ return None
307
+ def detect_contradiction(message):
308
+ patterns = [
309
+ (r"\b(i love you).{0,15}(i hate you|you ruin everything)", re.IGNORECASE),
310
+ (r"\b(i’m sorry).{0,15}(but you|if you hadn’t)", re.IGNORECASE),
311
+ (r"\b(i’m trying).{0,15}(you never|why do you)", re.IGNORECASE),
312
+ (r"\b(do what you want).{0,15}(you’ll regret it|i always give everything)", re.IGNORECASE),
313
+ (r"\b(i don’t care).{0,15}(you never think of me)", re.IGNORECASE),
314
+ (r"\b(i guess i’m just).{0,15}(the bad guy|worthless|never enough)", re.IGNORECASE)
315
+ ]
316
+ return any(re.search(p, message, flags) for p, flags in patterns)
317
+
318
+ def calculate_darvo_score(patterns, sentiment_before, sentiment_after, motifs_found, contradiction_flag=False):
319
+ # Count all detected DARVO-related patterns
320
+ pattern_hits = sum(1 for p in patterns if p.lower() in DARVO_PATTERNS)
321
+
322
+ # Sentiment delta
323
+ sentiment_shift_score = max(0.0, sentiment_after - sentiment_before)
324
+
325
+ # Match against DARVO motifs more loosely
326
+ motif_hits = sum(
327
+ any(phrase.lower() in motif.lower() or motif.lower() in phrase.lower()
328
+ for phrase in DARVO_MOTIFS)
329
+ for motif in motifs_found
330
+ )
331
+ motif_score = motif_hits / max(len(DARVO_MOTIFS), 1)
332
+
333
+ # Contradiction still binary
334
+ contradiction_score = 1.0 if contradiction_flag else 0.0
335
+
336
+ # Final DARVO score
337
+ return round(min(
338
+ 0.3 * pattern_hits +
339
+ 0.3 * sentiment_shift_score +
340
+ 0.25 * motif_score +
341
+ 0.15 * contradiction_score, 1.0
342
+ ), 3)
343
+ def detect_weapon_language(text):
344
+ weapon_keywords = [
345
+ "knife", "knives", "stab", "cut you", "cutting",
346
+ "gun", "shoot", "rifle", "firearm", "pistol",
347
+ "bomb", "blow up", "grenade", "explode",
348
+ "weapon", "armed", "loaded", "kill you", "take you out"
349
+ ]
350
+ text_lower = text.lower()
351
+ return any(word in text_lower for word in weapon_keywords)
352
+ def get_risk_stage(patterns, sentiment):
353
+ if "insults" in patterns:
354
+ return 2
355
+ elif "recovery phase" in patterns:
356
+ return 3
357
+ elif "control" in patterns or "guilt tripping" in patterns:
358
+ return 1
359
+ elif sentiment == "supportive" and any(p in patterns for p in ["projection", "dismissiveness"]):
360
+ return 4
361
+ return 1
362
+
363
+ def generate_risk_snippet(abuse_score, top_label, escalation_score, stage):
364
+ import re
365
+
366
+ # Extract aggression score if aggression is detected
367
+ if isinstance(top_label, str) and "aggression" in top_label.lower():
368
+ try:
369
+ match = re.search(r"\(?(\d+)\%?\)?", top_label)
370
+ aggression_score = int(match.group(1)) / 100 if match else 0
371
+ except:
372
+ aggression_score = 0
373
+ else:
374
+ aggression_score = 0
375
+
376
+ # Revised risk logic
377
+ if abuse_score >= 85 or escalation_score >= 16:
378
+ risk_level = "high"
379
+ elif abuse_score >= 60 or escalation_score >= 8 or aggression_score >= 0.25:
380
+ risk_level = "moderate"
381
+ elif stage == 2 and abuse_score >= 40:
382
+ risk_level = "moderate"
383
+ else:
384
+ risk_level = "low"
385
+
386
+ if isinstance(top_label, str) and " – " in top_label:
387
+ pattern_label, pattern_score = top_label.split(" – ")
388
+ else:
389
+ pattern_label = str(top_label) if top_label is not None else "Unknown"
390
+ pattern_score = ""
391
+
392
+ WHY_FLAGGED = {
393
+ "control": "This message may reflect efforts to restrict someone’s autonomy, even if it's framed as concern or care.",
394
+ "gaslighting": "This message could be manipulating someone into questioning their perception or feelings.",
395
+ "dismissiveness": "This message may include belittling, invalidating, or ignoring the other person’s experience.",
396
+ "insults": "Direct insults often appear in escalating abusive dynamics and can erode emotional safety.",
397
+ "blame shifting": "This message may redirect responsibility to avoid accountability, especially during conflict.",
398
+ "guilt tripping": "This message may induce guilt in order to control or manipulate behavior.",
399
+ "recovery phase": "This message may be part of a tension-reset cycle, appearing kind but avoiding change.",
400
+ "projection": "This message may involve attributing the abuser’s own behaviors to the victim.",
401
+ "contradictory statements": "This message may contain internal contradictions used to confuse, destabilize, or deflect responsibility.",
402
+ "obscure language": "This message may use overly formal, vague, or complex language to obscure meaning or avoid accountability.",
403
+ "default": "This message contains language patterns that may affect safety, clarity, or emotional autonomy."
404
+ }
405
+
406
+ explanation = WHY_FLAGGED.get(pattern_label.lower(), WHY_FLAGGED["default"])
407
+
408
+ base = f"\n\nπŸ›‘ Risk Level: {risk_level.capitalize()}\n"
409
+ base += f"This message shows strong indicators of **{pattern_label}**. "
410
+
411
+ if risk_level == "high":
412
+ base += "The language may reflect patterns of emotional control, even when expressed in soft or caring terms.\n"
413
+ elif risk_level == "moderate":
414
+ base += "There are signs of emotional pressure or verbal aggression that may escalate if repeated.\n"
415
+ else:
416
+ base += "The message does not strongly indicate abuse, but it's important to monitor for patterns.\n"
417
+
418
+ base += f"\nπŸ’‘ *Why this might be flagged:*\n{explanation}\n"
419
+ base += f"\nDetected Pattern: **{pattern_label} ({pattern_score})**\n"
420
+ base += "🧠 You can review the pattern in context. This tool highlights possible dynamicsβ€”not judgments."
421
+ return base
422
+
423
+ WHY_FLAGGED = {
424
+ "control": "This message may reflect efforts to restrict someone’s autonomy, even if it's framed as concern or care.",
425
+ "gaslighting": "This message could be manipulating someone into questioning their perception or feelings.",
426
+ "dismissiveness": "This message may include belittling, invalidating, or ignoring the other person’s experience.",
427
+ "insults": "Direct insults often appear in escalating abusive dynamics and can erode emotional safety.",
428
+ "blame shifting": "This message may redirect responsibility to avoid accountability, especially during conflict.",
429
+ "guilt tripping": "This message may induce guilt in order to control or manipulate behavior.",
430
+ "recovery phase": "This message may be part of a tension-reset cycle, appearing kind but avoiding change.",
431
+ "projection": "This message may involve attributing the abuser’s own behaviors to the victim.",
432
+ "contradictory statements": "This message may contain internal contradictions used to confuse, destabilize, or deflect responsibility.",
433
+ "obscure language": "This message may use overly formal, vague, or complex language to obscure meaning or avoid accountability.",
434
+ "default": "This message contains language patterns that may affect safety, clarity, or emotional autonomy."
435
+ }
436
+ explanation = WHY_FLAGGED.get(pattern_label.lower(), WHY_FLAGGED["default"])
437
+
438
+ base = f"\n\nπŸ›‘ Risk Level: {risk_level.capitalize()}\n"
439
+ base += f"This message shows strong indicators of **{pattern_label}**. "
440
+
441
+ if risk_level == "high":
442
+ base += "The language may reflect patterns of emotional control, even when expressed in soft or caring terms.\n"
443
+ elif risk_level == "moderate":
444
+ base += "There are signs of emotional pressure or indirect control that may escalate if repeated.\n"
445
+ else:
446
+ base += "The message does not strongly indicate abuse, but it's important to monitor for patterns.\n"
447
+
448
+ base += f"\nπŸ’‘ *Why this might be flagged:*\n{explanation}\n"
449
+ base += f"\nDetected Pattern: **{pattern_label} ({pattern_score})**\n"
450
+ base += "🧠 You can review the pattern in context. This tool highlights possible dynamicsβ€”not judgments."
451
+ return base
452
+
453
+ # --- Step X: Detect Immediate Danger Threats ---
454
+ THREAT_MOTIFS = [
455
+ "i'll kill you", "i’m going to hurt you", "you’re dead", "you won't survive this",
456
+ "i’ll break your face", "i'll bash your head in", "i’ll snap your neck",
457
+ "i’ll come over there and make you shut up", "i'll knock your teeth out",
458
+ "you’re going to bleed", "you want me to hit you?", "i won’t hold back next time",
459
+ "i swear to god i’ll beat you", "next time, i won’t miss", "i’ll make you scream",
460
+ "i know where you live", "i'm outside", "i’ll be waiting", "i saw you with him",
461
+ "you can’t hide from me", "i’m coming to get you", "i'll find you", "i know your schedule",
462
+ "i watched you leave", "i followed you home", "you'll regret this", "you’ll be sorry",
463
+ "you’re going to wish you hadn’t", "you brought this on yourself", "don’t push me",
464
+ "you have no idea what i’m capable of", "you better watch yourself",
465
+ "i don’t care what happens to you anymore", "i’ll make you suffer", "you’ll pay for this",
466
+ "i’ll never let you go", "you’re nothing without me", "if you leave me, i’ll kill myself",
467
+ "i'll ruin you", "i'll tell everyone what you did", "i’ll make sure everyone knows",
468
+ "i’m going to destroy your name", "you’ll lose everyone", "i’ll expose you",
469
+ "your friends will hate you", "i’ll post everything", "you’ll be cancelled",
470
+ "you’ll lose everything", "i’ll take the house", "i’ll drain your account",
471
+ "you’ll never see a dime", "you’ll be broke when i’m done", "i’ll make sure you lose your job",
472
+ "i’ll take your kids", "i’ll make sure you have nothing", "you can’t afford to leave me",
473
+ "don't make me do this", "you know what happens when i’m mad", "you’re forcing my hand",
474
+ "if you just behaved, this wouldn’t happen", "this is your fault",
475
+ "you’re making me hurt you", "i warned you", "you should have listened"
476
+ ]
477
+
478
+
479
+ def compute_abuse_score(matched_scores, sentiment):
480
+ if not matched_scores:
481
+ return 0
482
+
483
+ # Weighted average of passed patterns
484
+ weighted_total = sum(score * weight for _, score, weight in matched_scores)
485
+ weight_sum = sum(weight for _, _, weight in matched_scores)
486
+ base_score = (weighted_total / weight_sum) * 100
487
+
488
+ # Boost for pattern count
489
+ pattern_count = len(matched_scores)
490
+ scale = 1.0 + 0.25 * max(0, pattern_count - 1) # 1.25x for 2, 1.5x for 3+
491
+ scaled_score = base_score * scale
492
+
493
+ # Pattern floors
494
+ FLOORS = {
495
+ "control": 40,
496
+ "gaslighting": 30,
497
+ "insults": 25,
498
+ "aggression": 40
499
+ }
500
+ floor = max(FLOORS.get(label, 0) for label, _, _ in matched_scores)
501
+ adjusted_score = max(scaled_score, floor)
502
+
503
+ # Sentiment tweak
504
+ if sentiment == "undermining" and adjusted_score < 50:
505
+ adjusted_score += 10
506
+
507
+ return min(adjusted_score, 100)
508
+
509
+
510
+ def analyze_single_message(text, thresholds):
511
+ motif_hits, matched_phrases = detect_motifs(text)
512
+
513
+ # Get emotion profile
514
+ emotion_profile = get_emotion_profile(text)
515
+ sentiment_score = emotion_profile.get("anger", 0) + emotion_profile.get("disgust", 0)
516
+
517
+ # Get model scores
518
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
519
+ with torch.no_grad():
520
+ outputs = model(**inputs)
521
+ scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
522
+
523
+ # Sentiment override if neutral is high while critical thresholds are passed
524
+ if emotion_profile.get("neutral", 0) > 0.85 and any(
525
+ scores[LABELS.index(l)] > thresholds[l]
526
+ for l in ["control", "blame shifting"]
527
+ ):
528
+ sentiment = "undermining"
529
+ else:
530
+ sentiment = "undermining" if sentiment_score > 0.25 else "supportive"
531
+
532
+ weapon_flag = detect_weapon_language(text)
533
+
534
+ adjusted_thresholds = {
535
+ k: v + 0.05 if sentiment == "supportive" else v
536
+ for k, v in thresholds.items()
537
+ }
538
+
539
+ contradiction_flag = detect_contradiction(text)
540
+
541
+ threshold_labels = [
542
+ label for label, score in zip(LABELS, scores)
543
+ if score > adjusted_thresholds[label]
544
+ ]
545
+ tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, 0)
546
+ motifs = [phrase for _, phrase in matched_phrases]
547
+
548
+ darvo_score = calculate_darvo_score(
549
+ threshold_labels,
550
+ sentiment_before=0.0,
551
+ sentiment_after=sentiment_score,
552
+ motifs_found=motifs,
553
+ contradiction_flag=contradiction_flag
554
+ )
555
+
556
+ top_patterns = sorted(
557
+ [(label, score) for label, score in zip(LABELS, scores)],
558
+ key=lambda x: x[1],
559
+ reverse=True
560
+ )[:2]
561
+ # Post-threshold validation: strip recovery if it occurs with undermining sentiment
562
+ if "recovery" in threshold_labels and tone_tag == "forced accountability flip":
563
+ threshold_labels.remove("recovery")
564
+ top_patterns = [p for p in top_patterns if p[0] != "recovery"]
565
+ print("⚠️ Removing 'recovery' due to undermining sentiment (not genuine repair)")
566
+
567
+ matched_scores = [
568
+ (label, score, PATTERN_WEIGHTS.get(label, 1.0))
569
+ for label, score in zip(LABELS, scores)
570
+ if score > adjusted_thresholds[label]
571
+ ]
572
+
573
+ abuse_score_raw = compute_abuse_score(matched_scores, sentiment)
574
+ abuse_score = abuse_score_raw
575
+
576
+ # Risk stage logic
577
+ stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1
578
+ if weapon_flag and stage < 2:
579
+ stage = 2
580
+ if weapon_flag:
581
+ abuse_score_raw = min(abuse_score_raw + 25, 100)
582
+
583
+ abuse_score = min(
584
+ abuse_score_raw,
585
+ 100 if "control" in threshold_labels else 95
586
+ )
587
+
588
+ # Tag must happen after abuse score is finalized
589
+ tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score)
590
+
591
+ # ---- Profanity + Anger Override Logic ----
592
+ profane_words = {"fuck", "fucking", "bitch", "shit", "cunt", "ho", "asshole", "dick", "whore", "slut"}
593
+ tokens = set(text.lower().split())
594
+ has_profane = any(word in tokens for word in profane_words)
595
+
596
+ anger_score = emotion_profile.get("Anger", 0)
597
+ short_text = len(tokens) <= 10
598
+ insult_score = next((s for l, s in top_patterns if l == "insults"), 0)
599
+
600
+ if has_profane and anger_score > 0.75 and short_text:
601
+ print("⚠️ Profanity + Anger Override Triggered")
602
+ top_patterns = sorted(top_patterns, key=lambda x: x[1], reverse=True)
603
+ if top_patterns[0][0] != "insults":
604
+ top_patterns.insert(0, ("insults", insult_score))
605
+ if "insults" not in threshold_labels:
606
+ threshold_labels.append("insults")
607
+ top_patterns = [("insults", insult_score)] + [p for p in top_patterns if p[0] != "insults"]
608
+ # Debug
609
+ print(f"Emotional Tone Tag: {tone_tag}")
610
+ # Debug
611
+ print(f"Emotional Tone Tag: {tone_tag}")
612
+ print("Emotion Profile:")
613
+ for emotion, score in emotion_profile.items():
614
+ print(f" {emotion.capitalize():10}: {score}")
615
+ print("\n--- Debug Info ---")
616
+ print(f"Text: {text}")
617
+ print(f"Sentiment (via emotion): {sentiment} (score: {round(sentiment_score, 3)})")
618
+ print("Abuse Pattern Scores:")
619
+ for label, score in zip(LABELS, scores):
620
+ passed = "βœ…" if score > adjusted_thresholds[label] else "❌"
621
+ print(f" {label:25} β†’ {score:.3f} {passed}")
622
+ print(f"Matched for score: {[(l, round(s, 3)) for l, s, _ in matched_scores]}")
623
+ print(f"Abuse Score Raw: {round(abuse_score_raw, 1)}")
624
+ print(f"Motifs: {motifs}")
625
+ print(f"Contradiction: {contradiction_flag}")
626
+ print("------------------\n")
627
+
628
+ return abuse_score, threshold_labels, top_patterns, {"label": sentiment}, stage, darvo_score, tone_tag
629
+
630
+ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
631
+ from collections import Counter
632
+
633
+ none_selected_checked = answers_and_none[-1]
634
+ responses_checked = any(answers_and_none[:-1])
635
+ none_selected = not responses_checked and none_selected_checked
636
+
637
+ escalation_score = None
638
+ if not none_selected:
639
+ escalation_score = sum(w for (_, w), a in zip(ESCALATION_QUESTIONS, answers_and_none[:-1]) if a)
640
+
641
+ messages = [msg1, msg2, msg3]
642
+ active = [(m, f"Message {i+1}") for i, m in enumerate(messages) if m.strip()]
643
+ if not active:
644
+ return "Please enter at least one message."
645
+
646
+ # Flag any threat phrases present in the messages
647
+ import re
648
+
649
+ def normalize(text):
650
+ import unicodedata
651
+ text = text.lower().strip()
652
+ text = unicodedata.normalize("NFKD", text) # handles curly quotes
653
+ text = text.replace("’", "'") # smart to straight
654
+ return re.sub(r"[^a-z0-9 ]", "", text)
655
+
656
+ def detect_threat_motifs(message, motif_list):
657
+ norm_msg = normalize(message)
658
+ return [
659
+ motif for motif in motif_list
660
+ if normalize(motif) in norm_msg
661
+ ]
662
+
663
+ # Collect matches per message
664
+ immediate_threats = [detect_threat_motifs(m, THREAT_MOTIFS) for m, _ in active]
665
+ flat_threats = [t for sublist in immediate_threats for t in sublist]
666
+ threat_risk = "Yes" if flat_threats else "No"
667
+ results = [(analyze_single_message(m, THRESHOLDS.copy()), d) for m, d in active]
668
+
669
+ abuse_scores = [r[0][0] for r in results]
670
+ stages = [r[0][4] for r in results]
671
+ darvo_scores = [r[0][5] for r in results]
672
+ tone_tags = [r[0][6] for r in results]
673
+ dates_used = [r[1] for r in results]
674
+
675
+ predicted_labels = [label for r in results for label, _ in r[0][2]]
676
+ high = {'control'}
677
+ moderate = {'gaslighting', 'dismissiveness', 'obscure language', 'insults', 'contradictory statements', 'guilt tripping'}
678
+ low = {'blame shifting', 'projection', 'recovery phase'}
679
+ counts = {'high': 0, 'moderate': 0, 'low': 0}
680
+ for label in predicted_labels:
681
+ if label in high:
682
+ counts['high'] += 1
683
+ elif label in moderate:
684
+ counts['moderate'] += 1
685
+ elif label in low:
686
+ counts['low'] += 1
687
+
688
+ # Pattern escalation logic
689
+ pattern_escalation_risk = "Low"
690
+ if counts['high'] >= 2 and counts['moderate'] >= 2:
691
+ pattern_escalation_risk = "Critical"
692
+ elif (counts['high'] >= 2 and counts['moderate'] >= 1) or (counts['moderate'] >= 3) or (counts['high'] >= 1 and counts['moderate'] >= 2):
693
+ pattern_escalation_risk = "High"
694
+ elif (counts['moderate'] == 2) or (counts['high'] == 1 and counts['moderate'] == 1) or (counts['moderate'] == 1 and counts['low'] >= 2) or (counts['high'] == 1 and sum(counts.values()) == 1):
695
+ pattern_escalation_risk = "Moderate"
696
+
697
+ checklist_escalation_risk = "Unknown" if escalation_score is None else (
698
+ "Critical" if escalation_score >= 20 else
699
+ "Moderate" if escalation_score >= 10 else
700
+ "Low"
701
+ )
702
+
703
+ escalation_bump = 0
704
+ for result, _ in results:
705
+ abuse_score, _, _, sentiment, stage, darvo_score, tone_tag = result
706
+ if darvo_score > 0.65:
707
+ escalation_bump += 3
708
+ if tone_tag in ["forced accountability flip", "emotional threat"]:
709
+ escalation_bump += 2
710
+ if abuse_score > 80:
711
+ escalation_bump += 2
712
+ if stage == 2:
713
+ escalation_bump += 3
714
+
715
+ def rank(label):
716
+ return {"Low": 0, "Moderate": 1, "High": 2, "Critical": 3, "Unknown": 0}.get(label, 0)
717
+
718
+ combined_score = rank(pattern_escalation_risk) + rank(checklist_escalation_risk) + escalation_bump
719
+ escalation_risk = (
720
+ "Critical" if combined_score >= 6 else
721
+ "High" if combined_score >= 4 else
722
+ "Moderate" if combined_score >= 2 else
723
+ "Low"
724
+ )
725
+
726
+ if escalation_score is None:
727
+ escalation_text = "🚫 **Escalation Potential: Unknown** (Checklist not completed)\n⚠️ This section was not completed. Escalation potential is estimated using message data only.\n"
728
+ hybrid_score = 0
729
+ else:
730
+ hybrid_score = escalation_score + escalation_bump
731
+ escalation_text = f"πŸ“ˆ **Escalation Potential: {escalation_risk} ({hybrid_score}/29)**\n"
732
+ escalation_text += "πŸ“‹ This score combines your safety checklist answers *and* detected high-risk behavior.\n"
733
+ escalation_text += f"β€’ Pattern Risk: {pattern_escalation_risk}\n"
734
+ escalation_text += f"β€’ Checklist Risk: {checklist_escalation_risk}\n"
735
+ escalation_text += f"β€’ Escalation Bump: +{escalation_bump} (from DARVO, tone, intensity, etc.)"
736
+
737
+ # Composite Abuse Score
738
+ composite_abuse_scores = []
739
+ for result, _ in results:
740
+ _, _, top_patterns, sentiment, _, _, _ = result
741
+ matched_scores = [(label, score, PATTERN_WEIGHTS.get(label, 1.0)) for label, score in top_patterns]
742
+ final_score = compute_abuse_score(matched_scores, sentiment["label"])
743
+ composite_abuse_scores.append(final_score)
744
+ composite_abuse = int(round(sum(composite_abuse_scores) / len(composite_abuse_scores)))
745
+
746
+ most_common_stage = max(set(stages), key=stages.count)
747
+ stage_text = RISK_STAGE_LABELS[most_common_stage]
748
+ # Derive top label list for each message
749
+ top_labels = [r[0][1][0] if r[0][1] else r[0][2][0][0] for r in results]
750
+ avg_darvo = round(sum(darvo_scores) / len(darvo_scores), 3)
751
+ darvo_blurb = ""
752
+ if avg_darvo > 0.25:
753
+ level = "moderate" if avg_darvo < 0.65 else "high"
754
+ darvo_blurb = f"\n\n🎭 **DARVO Score: {avg_darvo}** β†’ This indicates a **{level} likelihood** of narrative reversal (DARVO), where the speaker may be denying, attacking, or reversing blame."
755
+
756
+ out = f"Abuse Intensity: {composite_abuse}%\n"
757
+ out += "πŸ“Š This reflects the strength and severity of detected abuse patterns in the message(s).\n\n"
758
+ out += generate_risk_snippet(composite_abuse, top_labels[0], hybrid_score, most_common_stage)
759
+ out += f"\n\n{stage_text}"
760
+ out += darvo_blurb
761
+ out += "\n\n🎭 **Emotional Tones Detected:**\n"
762
+ for i, tone in enumerate(tone_tags):
763
+ out += f"β€’ Message {i+1}: *{tone or 'none'}*\n"
764
+ # --- Add Immediate Danger Threats section
765
+ if flat_threats:
766
+ out += "\n\n🚨 **Immediate Danger Threats Detected:**\n"
767
+ for t in set(flat_threats):
768
+ out += f"β€’ \"{t}\"\n"
769
+ out += "\n⚠️ These phrases may indicate an imminent risk to physical safety."
770
+ else:
771
+ out += "\n\n🧩 **Immediate Danger Threats:** None explicitly detected.\n"
772
+ out += "This does *not* rule out risk, but no direct threat phrases were matched."
773
+ pattern_labels = [r[0][2][0][0] for r in results]
774
+ timeline_image = generate_abuse_score_chart(dates_used, abuse_scores, pattern_labels)
775
+ out += "\n\n" + escalation_text
776
+
777
+ return out, timeline_image
778
+
779
+ textbox_inputs = [gr.Textbox(label=f"Message {i+1}") for i in range(3)]
780
+ quiz_boxes = [gr.Checkbox(label=q) for q, _ in ESCALATION_QUESTIONS]
781
+ none_box = gr.Checkbox(label="None of the above")
782
+
783
+ iface = gr.Interface(
784
+ fn=analyze_composite,
785
+ inputs=textbox_inputs + quiz_boxes + [none_box],
786
+ outputs=[
787
+ gr.Textbox(label="Results"),
788
+ gr.Image(label="Abuse Score Timeline", type="pil")
789
+ ],
790
+ title="Abuse Pattern Detector + Escalation Quiz",
791
+ description="Enter up to three messages that concern you. For the most accurate results, enter messages that happened during a recent time period that felt emotionally intense or 'off.'",
792
+ allow_flagging="manual"
793
+ )
794
+
795
+ if __name__ == "__main__":
796
+ iface.launch()