Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import torch | |
import numpy as np | |
from transformers import pipeline, RobertaForSequenceClassification, RobertaTokenizer | |
from motif_tagging import detect_motifs | |
import re | |
import matplotlib.pyplot as plt | |
import io | |
from PIL import Image | |
from datetime import datetime | |
from transformers import pipeline as hf_pipeline # prevent name collision with gradio pipeline | |
def get_emotion_profile(text): | |
emotions = emotion_pipeline(text) | |
if isinstance(emotions, list) and isinstance(emotions[0], list): | |
emotions = emotions[0] | |
return {e['label'].lower(): round(e['score'], 3) for e in emotions} | |
# Emotion model (no retraining needed) | |
emotion_pipeline = hf_pipeline( | |
"text-classification", | |
model="j-hartmann/emotion-english-distilroberta-base", | |
top_k=6, | |
truncation=True | |
) | |
# --- Timeline Visualization Function --- | |
def generate_abuse_score_chart(dates, scores, labels): | |
import matplotlib.pyplot as plt | |
import io | |
from PIL import Image | |
from datetime import datetime | |
import re | |
# Determine if all entries are valid dates | |
if all(re.match(r"\d{4}-\d{2}-\d{2}", d) for d in dates): | |
parsed_x = [datetime.strptime(d, "%Y-%m-%d") for d in dates] | |
x_labels = [d.strftime("%Y-%m-%d") for d in parsed_x] | |
else: | |
parsed_x = list(range(1, len(dates) + 1)) | |
x_labels = [f"Message {i+1}" for i in range(len(dates))] | |
fig, ax = plt.subplots(figsize=(8, 3)) | |
ax.plot(parsed_x, scores, marker='o', linestyle='-', color='darkred', linewidth=2) | |
for x, y in zip(parsed_x, scores): | |
ax.text(x, y + 2, f"{int(y)}%", ha='center', fontsize=8, color='black') | |
ax.set_xticks(parsed_x) | |
ax.set_xticklabels(x_labels) | |
ax.set_xlabel("") # No axis label | |
ax.set_ylabel("Abuse Score (%)") | |
ax.set_ylim(0, 105) | |
ax.grid(True) | |
plt.tight_layout() | |
buf = io.BytesIO() | |
plt.savefig(buf, format='png') | |
buf.seek(0) | |
return Image.open(buf) | |
# --- Abuse Model --- | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer | |
model_name = "SamanthaStorm/tether-multilabel-v3" | |
model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) | |
LABELS = [ | |
"recovery", "control", "gaslighting", "guilt tripping", "dismissiveness", "blame shifting", | |
"nonabusive","projection", "insults", "contradictory statements", "obscure language" | |
] | |
THRESHOLDS = { | |
"recovery": 0.4, | |
"control": 0.45, | |
"gaslighting": 0.25, | |
"guilt tripping": .20, | |
"dismissiveness": 0.25, | |
"blame shifting": 0.25, | |
"projection": 0.25, | |
"insults": 0.05, | |
"contradictory statements": 0.25, | |
"obscure language": 0.25, | |
"nonabusive": 1.0 | |
} | |
PATTERN_WEIGHTS = { | |
"recovery": 0.7, | |
"control": 1.4, | |
"gaslighting": 1.50, | |
"guilt tripping": 1.2, | |
"dismissiveness": 0.9, | |
"blame shifting": 0.8, | |
"projection": 0.5, | |
"insults": 1.4, | |
"contradictory statements": 1.0, | |
"obscure language": 0.9, | |
"nonabusive": 0.0 | |
} | |
ESCALATION_RISKS = { | |
"blame shifting": "low", | |
"contradictory statements": "moderate", | |
"control": "high", | |
"dismissiveness": "moderate", | |
"gaslighting": "moderate", | |
"guilt tripping": "moderate", | |
"insults": "moderate", | |
"obscure language": "low", | |
"projection": "low", | |
"recovery phase": "low" | |
} | |
RISK_STAGE_LABELS = { | |
1: "🌀 Risk Stage: Tension-Building\nThis message reflects rising emotional pressure or subtle control attempts.", | |
2: "🔥 Risk Stage: Escalation\nThis message includes direct or aggressive patterns, suggesting active harm.", | |
3: "🌧️ Risk Stage: Reconciliation\nThis message reflects a reset attempt—apologies or emotional repair without accountability.", | |
4: "🌸 Risk Stage: Calm / Honeymoon\nThis message appears supportive but may follow prior harm, minimizing it." | |
} | |
ESCALATION_QUESTIONS = [ | |
("Partner has access to firearms or weapons", 4), | |
("Partner threatened to kill you", 3), | |
("Partner threatened you with a weapon", 3), | |
("Partner has ever choked you, even if you considered it consensual at the time", 4), | |
("Partner injured or threatened your pet(s)", 3), | |
("Partner has broken your things, punched or kicked walls, or thrown things ", 2), | |
("Partner forced or coerced you into unwanted sexual acts", 3), | |
("Partner threatened to take away your children", 2), | |
("Violence has increased in frequency or severity", 3), | |
("Partner monitors your calls/GPS/social media", 2) | |
] | |
def get_emotional_tone_tag(emotions, sentiment, patterns, abuse_score): | |
sadness = emotions.get("sadness", 0) | |
joy = emotions.get("joy", 0) | |
neutral = emotions.get("neutral", 0) | |
disgust = emotions.get("disgust", 0) | |
anger = emotions.get("anger", 0) | |
fear = emotions.get("fear", 0) | |
disgust = emotions.get("disgust", 0) | |
# 1. Performative Regret | |
if ( | |
sadness > 0.4 and | |
any(p in patterns for p in ["blame shifting", "guilt tripping", "recovery phase"]) and | |
(sentiment == "undermining" or abuse_score > 40) | |
): | |
return "performative regret" | |
# 2. Coercive Warmth | |
if ( | |
(joy > 0.3 or sadness > 0.4) and | |
any(p in patterns for p in ["control", "gaslighting"]) and | |
sentiment == "undermining" | |
): | |
return "coercive warmth" | |
# 3. Cold Invalidation | |
if ( | |
(neutral + disgust) > 0.5 and | |
any(p in patterns for p in ["dismissiveness", "projection", "obscure language"]) and | |
sentiment == "undermining" | |
): | |
return "cold invalidation" | |
# 4. Genuine Vulnerability | |
if ( | |
(sadness + fear) > 0.5 and | |
sentiment == "supportive" and | |
all(p in ["recovery phase"] for p in patterns) | |
): | |
return "genuine vulnerability" | |
# 5. Emotional Threat | |
if ( | |
(anger + disgust) > 0.5 and | |
any(p in patterns for p in ["control", "insults", "dismissiveness"]) and | |
sentiment == "undermining" | |
): | |
return "emotional threat" | |
# 6. Weaponized Sadness | |
if ( | |
sadness > 0.6 and | |
any(p in patterns for p in ["guilt tripping", "projection"]) and | |
sentiment == "undermining" | |
): | |
return "weaponized sadness" | |
# 7. Toxic Resignation | |
if ( | |
neutral > 0.5 and | |
any(p in patterns for p in ["dismissiveness", "obscure language"]) and | |
sentiment == "undermining" | |
): | |
return "toxic resignation" | |
# 8. Aggressive Dismissal | |
if ( | |
anger > 0.5 and | |
any(p in patterns for p in ["aggression", "insults", "control"]) and | |
sentiment == "undermining" | |
): | |
return "aggressive dismissal" | |
# 9. Deflective Hostility | |
if ( | |
(0.2 < anger < 0.7 or 0.2 < disgust < 0.7) and | |
any(p in patterns for p in ["deflection", "projection"]) and | |
sentiment == "undermining" | |
): | |
return "deflective hostility" | |
# 10. Mocking Detachment | |
if ( | |
(neutral + joy) > 0.5 and | |
any(p in patterns for p in ["mockery", "insults", "projection"]) and | |
sentiment == "undermining" | |
): | |
return "mocking detachment" | |
# 11. Contradictory Gaslight | |
if ( | |
(joy + anger + sadness) > 0.5 and | |
any(p in patterns for p in ["gaslighting", "contradictory statements"]) and | |
sentiment == "undermining" | |
): | |
return "contradictory gaslight" | |
# 12. Calculated Neutrality | |
if ( | |
neutral > 0.6 and | |
any(p in patterns for p in ["obscure language", "deflection", "dismissiveness"]) and | |
sentiment == "undermining" | |
): | |
return "calculated neutrality" | |
# 13. Forced Accountability Flip | |
if ( | |
(anger + disgust) > 0.5 and | |
any(p in patterns for p in ["blame shifting", "manipulation", "projection"]) and | |
sentiment == "undermining" | |
): | |
return "forced accountability flip" | |
# 14. Conditional Affection | |
if ( | |
joy > 0.4 and | |
any(p in patterns for p in ["apology baiting", "control", "recovery phase"]) and | |
sentiment == "undermining" | |
): | |
return "conditional affection" | |
if ( | |
(anger + disgust) > 0.5 and | |
any(p in patterns for p in ["blame shifting", "projection", "deflection"]) and | |
sentiment == "undermining" | |
): | |
return "forced accountability flip" | |
# Emotional Instability Fallback | |
if ( | |
(anger + sadness + disgust) > 0.6 and | |
sentiment == "undermining" | |
): | |
return "emotional instability" | |
return None | |
# 🔄 New DARVO score model (regression-based) | |
from torch.nn.functional import sigmoid | |
import torch | |
# Load your trained DARVO regressor from Hugging Face Hub | |
darvo_model = AutoModelForSequenceClassification.from_pretrained("SamanthaStorm/tether-darvo-regressor-v1") | |
darvo_tokenizer = AutoTokenizer.from_pretrained("SamanthaStorm/tether-darvo-regressor-v1", use_fast=False) | |
darvo_model.eval() | |
def predict_darvo_score(text): | |
inputs = darvo_tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
logits = darvo_model(**inputs).logits | |
score = sigmoid(logits).item() | |
return round(score, 4) # Rounded for display/output | |
def detect_weapon_language(text): | |
weapon_keywords = [ | |
"knife", "knives", "stab", "cut you", "cutting", | |
"gun", "shoot", "rifle", "firearm", "pistol", | |
"bomb", "blow up", "grenade", "explode", | |
"weapon", "armed", "loaded", "kill you", "take you out" | |
] | |
text_lower = text.lower() | |
return any(word in text_lower for word in weapon_keywords) | |
def get_risk_stage(patterns, sentiment): | |
if "insults" in patterns: | |
return 2 | |
elif "recovery phase" in patterns: | |
return 3 | |
elif "control" in patterns or "guilt tripping" in patterns: | |
return 1 | |
elif sentiment == "supportive" and any(p in patterns for p in ["projection", "dismissiveness"]): | |
return 4 | |
return 1 | |
def generate_risk_snippet(abuse_score, top_label, escalation_score, stage): | |
import re | |
# Extract aggression score if aggression is detected | |
if isinstance(top_label, str) and "aggression" in top_label.lower(): | |
try: | |
match = re.search(r"\(?(\d+)\%?\)?", top_label) | |
aggression_score = int(match.group(1)) / 100 if match else 0 | |
except: | |
aggression_score = 0 | |
else: | |
aggression_score = 0 | |
# Revised risk logic | |
if abuse_score >= 85 or escalation_score >= 16: | |
risk_level = "high" | |
elif abuse_score >= 60 or escalation_score >= 8 or aggression_score >= 0.25: | |
risk_level = "moderate" | |
elif stage == 2 and abuse_score >= 40: | |
risk_level = "moderate" | |
else: | |
risk_level = "low" | |
if isinstance(top_label, str) and " – " in top_label: | |
pattern_label, pattern_score = top_label.split(" – ") | |
else: | |
pattern_label = str(top_label) if top_label is not None else "Unknown" | |
pattern_score = "" | |
WHY_FLAGGED = { | |
"control": "This message may reflect efforts to restrict someone’s autonomy, even if it's framed as concern or care.", | |
"gaslighting": "This message could be manipulating someone into questioning their perception or feelings.", | |
"dismissiveness": "This message may include belittling, invalidating, or ignoring the other person’s experience.", | |
"insults": "Direct insults often appear in escalating abusive dynamics and can erode emotional safety.", | |
"blame shifting": "This message may redirect responsibility to avoid accountability, especially during conflict.", | |
"guilt tripping": "This message may induce guilt in order to control or manipulate behavior.", | |
"recovery phase": "This message may be part of a tension-reset cycle, appearing kind but avoiding change.", | |
"projection": "This message may involve attributing the abuser’s own behaviors to the victim.", | |
"contradictory statements": "This message may contain internal contradictions used to confuse, destabilize, or deflect responsibility.", | |
"obscure language": "This message may use overly formal, vague, or complex language to obscure meaning or avoid accountability.", | |
"default": "This message contains language patterns that may affect safety, clarity, or emotional autonomy." | |
} | |
explanation = WHY_FLAGGED.get(pattern_label.lower(), WHY_FLAGGED["default"]) | |
base = f"\n\n🛑 Risk Level: {risk_level.capitalize()}\n" | |
base += f"This message shows strong indicators of **{pattern_label}**. " | |
if risk_level == "high": | |
base += "The language may reflect patterns of emotional control, even when expressed in soft or caring terms.\n" | |
elif risk_level == "moderate": | |
base += "There are signs of emotional pressure or verbal aggression that may escalate if repeated.\n" | |
else: | |
base += "The message does not strongly indicate abuse, but it's important to monitor for patterns.\n" | |
base += f"\n💡 *Why this might be flagged:*\n{explanation}\n" | |
base += f"\nDetected Pattern: **{pattern_label} ({pattern_score})**\n" | |
base += "🧠 You can review the pattern in context. This tool highlights possible dynamics—not judgments." | |
return base | |
# --- Step X: Detect Immediate Danger Threats --- | |
THREAT_MOTIFS = [ | |
"i'll kill you", "i’m going to hurt you", "you’re dead", "you won't survive this", | |
"i’ll break your face", "i'll bash your head in", "i’ll snap your neck", | |
"i’ll come over there and make you shut up", "i'll knock your teeth out", | |
"you’re going to bleed", "you want me to hit you?", "i won’t hold back next time", | |
"i swear to god i’ll beat you", "next time, i won’t miss", "i’ll make you scream", | |
"i know where you live", "i'm outside", "i’ll be waiting", "i saw you with him", | |
"you can’t hide from me", "i’m coming to get you", "i'll find you", "i know your schedule", | |
"i watched you leave", "i followed you home", "you'll regret this", "you’ll be sorry", | |
"you’re going to wish you hadn’t", "you brought this on yourself", "don’t push me", | |
"you have no idea what i’m capable of", "you better watch yourself", | |
"i don’t care what happens to you anymore", "i’ll make you suffer", "you’ll pay for this", | |
"i’ll never let you go", "you’re nothing without me", "if you leave me, i’ll kill myself", | |
"i'll ruin you", "i'll tell everyone what you did", "i’ll make sure everyone knows", | |
"i’m going to destroy your name", "you’ll lose everyone", "i’ll expose you", | |
"your friends will hate you", "i’ll post everything", "you’ll be cancelled", | |
"you’ll lose everything", "i’ll take the house", "i’ll drain your account", | |
"you’ll never see a dime", "you’ll be broke when i’m done", "i’ll make sure you lose your job", | |
"i’ll take your kids", "i’ll make sure you have nothing", "you can’t afford to leave me", | |
"don't make me do this", "you know what happens when i’m mad", "you’re forcing my hand", | |
"if you just behaved, this wouldn’t happen", "this is your fault", | |
"you’re making me hurt you", "i warned you", "you should have listened" | |
] | |
def compute_abuse_score(matched_scores, sentiment): | |
if not matched_scores: | |
return 0 | |
# Weighted average of passed patterns | |
weighted_total = sum(score * weight for _, score, weight in matched_scores) | |
weight_sum = sum(weight for _, _, weight in matched_scores) | |
base_score = (weighted_total / weight_sum) * 100 | |
# Boost for pattern count | |
pattern_count = len(matched_scores) | |
scale = 1.0 + 0.25 * max(0, pattern_count - 1) # 1.25x for 2, 1.5x for 3+ | |
scaled_score = base_score * scale | |
# Pattern floors | |
FLOORS = { | |
"control": 40, | |
"gaslighting": 30, | |
"insults": 25, | |
"aggression": 40 | |
} | |
floor = max(FLOORS.get(label, 0) for label, _, _ in matched_scores) | |
adjusted_score = max(scaled_score, floor) | |
# Sentiment tweak | |
if sentiment == "undermining" and adjusted_score < 50: | |
adjusted_score += 10 | |
return min(adjusted_score, 100) | |
def analyze_single_message(text, thresholds): | |
motif_hits, matched_phrases = detect_motifs(text) | |
# Get emotion profile | |
emotion_profile = get_emotion_profile(text) | |
sentiment_score = emotion_profile.get("anger", 0) + emotion_profile.get("disgust", 0) | |
# Get model scores | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy() | |
# Sentiment override if neutral is high while critical thresholds are passed | |
if emotion_profile.get("neutral", 0) > 0.85 and any( | |
scores[LABELS.index(l)] > thresholds[l] | |
for l in ["control", "blame shifting"] | |
): | |
sentiment = "undermining" | |
else: | |
sentiment = "undermining" if sentiment_score > 0.25 else "supportive" | |
weapon_flag = detect_weapon_language(text) | |
adjusted_thresholds = { | |
k: v + 0.05 if sentiment == "supportive" else v | |
for k, v in thresholds.items() | |
} | |
darvo_score = predict_darvo_score(text) | |
threshold_labels = [ | |
label for label, score in zip(LABELS, scores) | |
if score > adjusted_thresholds[label] | |
] | |
top_patterns = sorted( | |
[(label, score) for label, score in zip(LABELS, scores)], | |
key=lambda x: x[1], | |
reverse=True | |
)[:2] | |
# Post-threshold validation: strip recovery if it occurs with undermining sentiment | |
if "recovery" in threshold_labels and tone_tag == "forced accountability flip": | |
threshold_labels.remove("recovery") | |
top_patterns = [p for p in top_patterns if p[0] != "recovery"] | |
print("⚠️ Removing 'recovery' due to undermining sentiment (not genuine repair)") | |
matched_scores = [ | |
(label, score, PATTERN_WEIGHTS.get(label, 1.0)) | |
for label, score in zip(LABELS, scores) | |
if score > adjusted_thresholds[label] | |
] | |
abuse_score_raw = compute_abuse_score(matched_scores, sentiment) | |
abuse_score = abuse_score_raw | |
# Risk stage logic | |
stage = get_risk_stage(threshold_labels, sentiment) if threshold_labels else 1 | |
if weapon_flag and stage < 2: | |
stage = 2 | |
if weapon_flag: | |
abuse_score_raw = min(abuse_score_raw + 25, 100) | |
abuse_score = min( | |
abuse_score_raw, | |
100 if "control" in threshold_labels else 95 | |
) | |
# Tag must happen after abuse score is finalized | |
tone_tag = get_emotional_tone_tag(emotion_profile, sentiment, threshold_labels, abuse_score) | |
# ---- Profanity + Anger Override Logic ---- | |
profane_words = {"fuck", "fucking", "bitch", "shit", "cunt", "ho", "asshole", "dick", "whore", "slut"} | |
tokens = set(text.lower().split()) | |
has_profane = any(word in tokens for word in profane_words) | |
anger_score = emotion_profile.get("Anger", 0) | |
short_text = len(tokens) <= 10 | |
insult_score = next((s for l, s in top_patterns if l == "insults"), 0) | |
if has_profane and anger_score > 0.75 and short_text: | |
print("⚠️ Profanity + Anger Override Triggered") | |
top_patterns = sorted(top_patterns, key=lambda x: x[1], reverse=True) | |
if top_patterns[0][0] != "insults": | |
top_patterns.insert(0, ("insults", insult_score)) | |
if "insults" not in threshold_labels: | |
threshold_labels.append("insults") | |
top_patterns = [("insults", insult_score)] + [p for p in top_patterns if p[0] != "insults"] | |
# Debug | |
print(f"Emotional Tone Tag: {tone_tag}") | |
# Debug | |
print(f"Emotional Tone Tag: {tone_tag}") | |
print("Emotion Profile:") | |
for emotion, score in emotion_profile.items(): | |
print(f" {emotion.capitalize():10}: {score}") | |
print("\n--- Debug Info ---") | |
print(f"Text: {text}") | |
print(f"Sentiment (via emotion): {sentiment} (score: {round(sentiment_score, 3)})") | |
print("Abuse Pattern Scores:") | |
for label, score in zip(LABELS, scores): | |
passed = "✅" if score > adjusted_thresholds[label] else "❌" | |
print(f" {label:25} → {score:.3f} {passed}") | |
print(f"Matched for score: {[(l, round(s, 3)) for l, s, _ in matched_scores]}") | |
print(f"Abuse Score Raw: {round(abuse_score_raw, 1)}") | |
print("------------------\n") | |
return abuse_score, threshold_labels, top_patterns, {"label": sentiment}, stage, darvo_score, tone_tag | |
def analyze_composite(msg1, msg2, msg3, *answers_and_none): | |
from collections import Counter | |
none_selected_checked = answers_and_none[-1] | |
responses_checked = any(answers_and_none[:-1]) | |
none_selected = not responses_checked and none_selected_checked | |
escalation_score = None | |
if not none_selected: | |
escalation_score = sum(w for (_, w), a in zip(ESCALATION_QUESTIONS, answers_and_none[:-1]) if a) | |
messages = [msg1, msg2, msg3] | |
active = [(m, f"Message {i+1}") for i, m in enumerate(messages) if m.strip()] | |
if not active: | |
return "Please enter at least one message." | |
# Flag any threat phrases present in the messages | |
import re | |
def normalize(text): | |
import unicodedata | |
text = text.lower().strip() | |
text = unicodedata.normalize("NFKD", text) # handles curly quotes | |
text = text.replace("’", "'") # smart to straight | |
return re.sub(r"[^a-z0-9 ]", "", text) | |
def detect_threat_motifs(message, motif_list): | |
norm_msg = normalize(message) | |
return [ | |
motif for motif in motif_list | |
if normalize(motif) in norm_msg | |
] | |
# Collect matches per message | |
immediate_threats = [detect_threat_motifs(m, THREAT_MOTIFS) for m, _ in active] | |
flat_threats = [t for sublist in immediate_threats for t in sublist] | |
threat_risk = "Yes" if flat_threats else "No" | |
results = [(analyze_single_message(m, THRESHOLDS.copy()), d) for m, d in active] | |
abuse_scores = [r[0][0] for r in results] | |
stages = [r[0][4] for r in results] | |
darvo_scores = [r[0][5] for r in results] | |
tone_tags = [r[0][6] for r in results] | |
dates_used = [r[1] for r in results] | |
predicted_labels = [label for r in results for label, _ in r[0][2]] | |
high = {'control'} | |
moderate = {'gaslighting', 'dismissiveness', 'obscure language', 'insults', 'contradictory statements', 'guilt tripping'} | |
low = {'blame shifting', 'projection', 'recovery phase'} | |
counts = {'high': 0, 'moderate': 0, 'low': 0} | |
for label in predicted_labels: | |
if label in high: | |
counts['high'] += 1 | |
elif label in moderate: | |
counts['moderate'] += 1 | |
elif label in low: | |
counts['low'] += 1 | |
# Pattern escalation logic | |
pattern_escalation_risk = "Low" | |
if counts['high'] >= 2 and counts['moderate'] >= 2: | |
pattern_escalation_risk = "Critical" | |
elif (counts['high'] >= 2 and counts['moderate'] >= 1) or (counts['moderate'] >= 3) or (counts['high'] >= 1 and counts['moderate'] >= 2): | |
pattern_escalation_risk = "High" | |
elif (counts['moderate'] == 2) or (counts['high'] == 1 and counts['moderate'] == 1) or (counts['moderate'] == 1 and counts['low'] >= 2) or (counts['high'] == 1 and sum(counts.values()) == 1): | |
pattern_escalation_risk = "Moderate" | |
checklist_escalation_risk = "Unknown" if escalation_score is None else ( | |
"Critical" if escalation_score >= 20 else | |
"Moderate" if escalation_score >= 10 else | |
"Low" | |
) | |
escalation_bump = 0 | |
for result, _ in results: | |
abuse_score, _, _, sentiment, stage, darvo_score, tone_tag = result | |
if darvo_score > 0.65: | |
escalation_bump += 3 | |
if tone_tag in ["forced accountability flip", "emotional threat"]: | |
escalation_bump += 2 | |
if abuse_score > 80: | |
escalation_bump += 2 | |
if stage == 2: | |
escalation_bump += 3 | |
def rank(label): | |
return {"Low": 0, "Moderate": 1, "High": 2, "Critical": 3, "Unknown": 0}.get(label, 0) | |
combined_score = rank(pattern_escalation_risk) + rank(checklist_escalation_risk) + escalation_bump | |
escalation_risk = ( | |
"Critical" if combined_score >= 6 else | |
"High" if combined_score >= 4 else | |
"Moderate" if combined_score >= 2 else | |
"Low" | |
) | |
if escalation_score is None: | |
escalation_text = "🚫 **Escalation Potential: Unknown** (Checklist not completed)\n⚠️ This section was not completed. Escalation potential is estimated using message data only.\n" | |
hybrid_score = 0 | |
else: | |
hybrid_score = escalation_score + escalation_bump | |
escalation_text = f"📈 **Escalation Potential: {escalation_risk} ({hybrid_score}/29)**\n" | |
escalation_text += "📋 This score combines your safety checklist answers *and* detected high-risk behavior.\n" | |
escalation_text += f"• Pattern Risk: {pattern_escalation_risk}\n" | |
escalation_text += f"• Checklist Risk: {checklist_escalation_risk}\n" | |
escalation_text += f"• Escalation Bump: +{escalation_bump} (from DARVO, tone, intensity, etc.)" | |
# Composite Abuse Score | |
composite_abuse_scores = [] | |
for result, _ in results: | |
_, _, top_patterns, sentiment, _, _, _ = result | |
matched_scores = [(label, score, PATTERN_WEIGHTS.get(label, 1.0)) for label, score in top_patterns] | |
final_score = compute_abuse_score(matched_scores, sentiment["label"]) | |
composite_abuse_scores.append(final_score) | |
composite_abuse = int(round(sum(composite_abuse_scores) / len(composite_abuse_scores))) | |
most_common_stage = max(set(stages), key=stages.count) | |
stage_text = RISK_STAGE_LABELS[most_common_stage] | |
# Derive top label list for each message | |
top_labels = [r[0][1][0] if r[0][1] else r[0][2][0][0] for r in results] | |
avg_darvo = round(sum(darvo_scores) / len(darvo_scores), 3) | |
darvo_blurb = "" | |
if avg_darvo > 0.25: | |
level = "moderate" if avg_darvo < 0.65 else "high" | |
darvo_blurb = f"\n\n🎭 **DARVO Score: {avg_darvo}** → This indicates a **{level} likelihood** of narrative reversal (DARVO), where the speaker may be denying, attacking, or reversing blame." | |
out = f"Abuse Intensity: {composite_abuse}%\n" | |
out += "📊 This reflects the strength and severity of detected abuse patterns in the message(s).\n\n" | |
out += generate_risk_snippet(composite_abuse, top_labels[0], hybrid_score, most_common_stage) | |
out += f"\n\n{stage_text}" | |
out += darvo_blurb | |
out += "\n\n🎭 **Emotional Tones Detected:**\n" | |
for i, tone in enumerate(tone_tags): | |
out += f"• Message {i+1}: *{tone or 'none'}*\n" | |
# --- Add Immediate Danger Threats section | |
if flat_threats: | |
out += "\n\n🚨 **Immediate Danger Threats Detected:**\n" | |
for t in set(flat_threats): | |
out += f"• \"{t}\"\n" | |
out += "\n⚠️ These phrases may indicate an imminent risk to physical safety." | |
else: | |
out += "\n\n🧩 **Immediate Danger Threats:** None explicitly detected.\n" | |
out += "This does *not* rule out risk, but no direct threat phrases were matched." | |
pattern_labels = [r[0][2][0][0] for r in results] | |
timeline_image = generate_abuse_score_chart(dates_used, abuse_scores, pattern_labels) | |
out += "\n\n" + escalation_text | |
return out, timeline_image | |
textbox_inputs = [gr.Textbox(label=f"Message {i+1}") for i in range(3)] | |
quiz_boxes = [gr.Checkbox(label=q) for q, _ in ESCALATION_QUESTIONS] | |
none_box = gr.Checkbox(label="None of the above") | |
iface = gr.Interface( | |
fn=analyze_composite, | |
inputs=textbox_inputs + quiz_boxes + [none_box], | |
outputs=[ | |
gr.Textbox(label="Results"), | |
gr.Image(label="Abuse Score Timeline", type="pil") | |
], | |
title="Abuse Pattern Detector + Escalation Quiz", | |
description="Enter up to three messages that concern you. For the most accurate results, enter messages that happened during a recent time period that felt emotionally intense or 'off.'", | |
allow_flagging="manual" | |
) | |
if __name__ == "__main__": | |
iface.launch() | |