Spaces:
Sleeping
Sleeping
File size: 6,516 Bytes
239a968 70ce6b1 87dbc4d fe6b66c 70ce6b1 fe6b66c 70ce6b1 93ddbae d4d856f 93ddbae 70ce6b1 834f0ff 70ce6b1 818e2e6 70ce6b1 818e2e6 70ce6b1 97cb677 93ddbae 70ce6b1 93ddbae 70ce6b1 93ddbae 70ce6b1 93ddbae 70ce6b1 93ddbae 70ce6b1 93ddbae 70ce6b1 834f0ff 93ddbae 70ce6b1 239a968 fe6b66c 70ce6b1 2304a5e 70ce6b1 93ddbae fe6b66c 70ce6b1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
import gradio as gr
import torch
from transformers import pipeline as hf_pipeline, AutoModelForSequenceClassification, AutoTokenizer
# ——— 1) Emotion Pipeline ————————————————————————————————————————————————
emotion_pipeline = hf_pipeline(
"text-classification",
model="j-hartmann/emotion-english-distilroberta-base",
top_k=None,
truncation=True
)
def get_emotion_profile(text):
"""
Returns a dict of { emotion_label: score } for the input text.
"""
results = emotion_pipeline(text)
# some pipelines return [[…]]
if isinstance(results, list) and isinstance(results[0], list):
results = results[0]
return {r["label"].lower(): round(r["score"], 3) for r in results}
# ——— 2) Abuse-patterns Model ——————————————————————————————————————————————
model_name = "SamanthaStorm/tether-multilabel-v3"
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
LABELS = [
"blame shifting", "contradictory statements", "control", "dismissiveness",
"gaslighting", "guilt tripping", "insults", "obscure language",
"projection", "recovery phase", "threat"
]
THRESHOLDS = {
"blame shifting": 0.28, "contradictory statements": 0.27, "control": 0.08, "dismissiveness": 0.32,
"gaslighting": 0.27, "guilt tripping": 0.31, "insults": 0.10, "obscure language": 0.55,
"projection": 0.09, "recovery phase": 0.33, "threat": 0.15
}
# ——— 3) Emotional-tone tagging (no abuse_score / DARVO) —————————————————————————————
def get_tone_tag(emotion_profile, patterns):
anger = emotion_profile.get("anger", 0)
disgust = emotion_profile.get("disgust", 0)
sadness = emotion_profile.get("sadness", 0)
joy = emotion_profile.get("joy", 0)
neutral = emotion_profile.get("neutral", 0)
fear = emotion_profile.get("fear", 0)
# 1) Vulnerable: sadness high + recovery-phase
if sadness > 0.4 and "recovery phase" in patterns:
return "vulnerable"
# 2) Supportive: joy very high + no other patterns (or only recovery-phase)
if joy > 0.5 and (not patterns or patterns == ["recovery phase"]):
return "supportive"
# 3) Confrontational: anger/disgust high + aggressive patterns
if (anger + disgust) > 0.5 and any(p in patterns for p in ["insults", "control", "threat"]):
return "confrontational"
# 4) Manipulative: neutral high + classic manipulation patterns
if neutral > 0.4 and any(p in patterns for p in ["gaslighting", "dismissiveness", "projection", "guilt tripping", "blame shifting"]):
return "manipulative"
# 5) Feigned Warmth: joy high but manipulative patterns present
if joy > 0.5 and any(p in patterns for p in ["gaslighting", "dismissiveness", "projection", "guilt tripping", "blame shifting"]):
return "feigned warmth"
# 6) Defensive: anger high + contradictory statements
if anger > 0.4 and "contradictory statements" in patterns:
return "defensive"
# 7) Neutral: pure neutral dominates all
if neutral > max(anger, disgust, sadness, joy, fear):
return "neutral"
return None
# ——— 3) Single-message analysis ——————————————————————————————————————————————
def analyze_message(text):
"""
Runs emotion profiling, and the abuse-pattern classifier.
Returns a dict with:
- emotion_profile: { emotion: score }
- active_patterns: [ labels above their threshold ]
"""
emotion_profile = get_emotion_profile(text)
# get raw model scores
toks = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
logits = model(**toks).logits.squeeze(0)
scores = torch.sigmoid(logits).cpu().numpy()
# pick up all labels whose score >= threshold
active = [lab for lab, sc in zip(LABELS, scores) if sc >= THRESHOLDS[lab]]
return {
"emotion_profile": emotion_profile,
"active_patterns": active,
"tone_tag": tone_tag
}
# ——— 5) Composite wrapper (handles .txt or image + text boxes) ——————————————————————
def analyze_composite(uploaded_file, *texts):
outputs = []
if uploaded_file is not None:
raw = uploaded_file.read()
name = uploaded_file.name.lower()
if name.endswith((".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".gif")):
img = Image.open(io.BytesIO(raw))
content = pytesseract.image_to_string(img)
else:
try:
content = raw.decode("utf-8")
except UnicodeDecodeError:
content = raw.decode("latin-1")
r = analyze_message(content)
outputs.append(
"── Uploaded File ──\n"
f"Emotion Profile : {r['emotion_profile']}\n"
f"Active Patterns : {r['active_patterns']}\n"
f"Emotional Tone : {r['tone_tag']}\n"
)
for idx, txt in enumerate(texts, start=1):
if not txt:
continue
r = analyze_message(txt)
outputs.append(
f"── Message {idx} ──\n"
f"Emotion Profile : {r['emotion_profile']}\n"
f"Active Patterns : {r['active_patterns']}\n"
f"Emotional Tone : {r['tone_tag']}\n"
)
return "\n".join(outputs) if outputs else "Please enter at least one message."
# ——— 6) Gradio interface ————————————————————————————————————————————————
message_inputs = [gr.Textbox(label=f"Message {i+1}") for i in range(3)]
iface = gr.Interface(
fn=analyze_composite,
inputs=[
gr.File(file_types=[".txt", ".png", ".jpg", ".jpeg"],
label="Upload text or image")
] + message_inputs,
outputs=gr.Textbox(label="Analysis"),
title="Tether Analyzer (with Tone Tags)",
description="Extracts motifs, emotions, patterns—and now an emotional tone tag—no abuse score or DARVO."
)
if __name__ == "__main__":
iface.launch() |