Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,9 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import pipeline as hf_pipeline, AutoModelForSequenceClassification, AutoTokenizer
|
4 |
-
|
|
|
|
|
5 |
|
6 |
# ——— 1) Emotion Pipeline ————————————————————————————————————————————————
|
7 |
emotion_pipeline = hf_pipeline(
|
@@ -12,11 +14,7 @@ emotion_pipeline = hf_pipeline(
|
|
12 |
)
|
13 |
|
14 |
def get_emotion_profile(text):
|
15 |
-
"""
|
16 |
-
Returns a dict of { emotion_label: score } for the input text.
|
17 |
-
"""
|
18 |
results = emotion_pipeline(text)
|
19 |
-
# some pipelines return [[…]]
|
20 |
if isinstance(results, list) and isinstance(results[0], list):
|
21 |
results = results[0]
|
22 |
return {r["label"].lower(): round(r["score"], 3) for r in results}
|
@@ -39,6 +37,7 @@ THRESHOLDS = {
|
|
39 |
"projection": 0.09, "recovery phase": 0.33, "threat": 0.15
|
40 |
}
|
41 |
|
|
|
42 |
# ——— 3) Emotional-tone tagging (no abuse_score / DARVO) —————————————————————————————
|
43 |
def get_emotional_tone_tag(emotion_profile, patterns):
|
44 |
anger = emotion_profile.get("anger", 0)
|
@@ -60,7 +59,7 @@ def get_emotional_tone_tag(emotion_profile, patterns):
|
|
60 |
if (anger + disgust) > 0.5 and any(p in patterns for p in ["insults", "control", "threat"]):
|
61 |
return "confrontational"
|
62 |
|
63 |
-
# 4) Manipulative: neutral high +
|
64 |
if neutral > 0.4 and any(p in patterns for p in ["gaslighting", "dismissiveness", "projection", "guilt tripping", "blame shifting"]):
|
65 |
return "manipulative"
|
66 |
|
@@ -79,38 +78,38 @@ def get_emotional_tone_tag(emotion_profile, patterns):
|
|
79 |
return None
|
80 |
|
81 |
|
82 |
-
# ———
|
83 |
def analyze_message(text):
|
84 |
-
|
85 |
-
Runs emotion profiling, and the abuse-pattern classifier.
|
86 |
-
Returns a dict with:
|
87 |
-
- emotion_profile: { emotion: score }
|
88 |
-
- active_patterns: [ labels above their threshold ]
|
89 |
-
"""
|
90 |
-
|
91 |
emotion_profile = get_emotion_profile(text)
|
92 |
|
93 |
-
#
|
94 |
toks = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
95 |
with torch.no_grad():
|
96 |
logits = model(**toks).logits.squeeze(0)
|
97 |
scores = torch.sigmoid(logits).cpu().numpy()
|
98 |
|
99 |
-
#
|
100 |
-
|
|
|
|
|
|
|
101 |
|
|
|
102 |
tone_tag = get_emotional_tone_tag(emotion_profile, active_patterns)
|
|
|
103 |
return {
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
}
|
108 |
|
109 |
|
|
|
110 |
def analyze_composite(uploaded_file, *texts):
|
111 |
outputs = []
|
112 |
|
113 |
-
#
|
114 |
if uploaded_file is not None:
|
115 |
raw = uploaded_file.read()
|
116 |
name = uploaded_file.name.lower()
|
@@ -126,21 +125,21 @@ def analyze_composite(uploaded_file, *texts):
|
|
126 |
r = analyze_message(content)
|
127 |
outputs.append(
|
128 |
"── Uploaded File ──\n"
|
129 |
-
f"Emotion Profile
|
130 |
-
f"Active Patterns
|
131 |
-
f"Emotional Tone
|
132 |
)
|
133 |
|
134 |
-
#
|
135 |
for idx, txt in enumerate(texts, start=1):
|
136 |
if not txt:
|
137 |
continue
|
138 |
r = analyze_message(txt)
|
139 |
outputs.append(
|
140 |
f"── Message {idx} ──\n"
|
141 |
-
f"Emotion Profile
|
142 |
-
f"Active Patterns
|
143 |
-
f"Emotional Tone
|
144 |
)
|
145 |
|
146 |
if not outputs:
|
@@ -148,19 +147,17 @@ def analyze_composite(uploaded_file, *texts):
|
|
148 |
|
149 |
return "\n".join(outputs)
|
150 |
|
|
|
151 |
# ——— 6) Gradio interface ————————————————————————————————————————————————
|
152 |
message_inputs = [gr.Textbox(label=f"Message {i+1}") for i in range(3)]
|
153 |
|
154 |
iface = gr.Interface(
|
155 |
fn=analyze_composite,
|
156 |
-
inputs=[
|
157 |
-
gr.File(file_types=[".txt", ".png", ".jpg", ".jpeg"],
|
158 |
-
label="Upload text or image")
|
159 |
-
] + message_inputs,
|
160 |
outputs=gr.Textbox(label="Analysis"),
|
161 |
-
title="Tether Analyzer (
|
162 |
-
description="
|
163 |
)
|
164 |
|
165 |
if __name__ == "__main__":
|
166 |
-
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
from transformers import pipeline as hf_pipeline, AutoModelForSequenceClassification, AutoTokenizer
|
4 |
+
from PIL import Image
|
5 |
+
import pytesseract
|
6 |
+
import io
|
7 |
|
8 |
# ——— 1) Emotion Pipeline ————————————————————————————————————————————————
|
9 |
emotion_pipeline = hf_pipeline(
|
|
|
14 |
)
|
15 |
|
16 |
def get_emotion_profile(text):
|
|
|
|
|
|
|
17 |
results = emotion_pipeline(text)
|
|
|
18 |
if isinstance(results, list) and isinstance(results[0], list):
|
19 |
results = results[0]
|
20 |
return {r["label"].lower(): round(r["score"], 3) for r in results}
|
|
|
37 |
"projection": 0.09, "recovery phase": 0.33, "threat": 0.15
|
38 |
}
|
39 |
|
40 |
+
|
41 |
# ——— 3) Emotional-tone tagging (no abuse_score / DARVO) —————————————————————————————
|
42 |
def get_emotional_tone_tag(emotion_profile, patterns):
|
43 |
anger = emotion_profile.get("anger", 0)
|
|
|
59 |
if (anger + disgust) > 0.5 and any(p in patterns for p in ["insults", "control", "threat"]):
|
60 |
return "confrontational"
|
61 |
|
62 |
+
# 4) Manipulative: neutral high + manipulation patterns
|
63 |
if neutral > 0.4 and any(p in patterns for p in ["gaslighting", "dismissiveness", "projection", "guilt tripping", "blame shifting"]):
|
64 |
return "manipulative"
|
65 |
|
|
|
78 |
return None
|
79 |
|
80 |
|
81 |
+
# ——— 4) Single-message analysis —————————————————————————————————————————————
|
82 |
def analyze_message(text):
|
83 |
+
# 1) emotion profiling
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
emotion_profile = get_emotion_profile(text)
|
85 |
|
86 |
+
# 2) abuse-pattern classification
|
87 |
toks = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
88 |
with torch.no_grad():
|
89 |
logits = model(**toks).logits.squeeze(0)
|
90 |
scores = torch.sigmoid(logits).cpu().numpy()
|
91 |
|
92 |
+
# 3) identify active patterns
|
93 |
+
active_patterns = [
|
94 |
+
label for label, prob in zip(LABELS, scores)
|
95 |
+
if prob >= THRESHOLDS[label]
|
96 |
+
]
|
97 |
|
98 |
+
# 4) tone tagging
|
99 |
tone_tag = get_emotional_tone_tag(emotion_profile, active_patterns)
|
100 |
+
|
101 |
return {
|
102 |
+
"emotion_profile": emotion_profile,
|
103 |
+
"active_patterns": active_patterns,
|
104 |
+
"tone_tag": tone_tag
|
105 |
+
}
|
106 |
|
107 |
|
108 |
+
# ——— 5) Composite wrapper (handles .txt or image + text boxes) ——————————————————————
|
109 |
def analyze_composite(uploaded_file, *texts):
|
110 |
outputs = []
|
111 |
|
112 |
+
# file upload
|
113 |
if uploaded_file is not None:
|
114 |
raw = uploaded_file.read()
|
115 |
name = uploaded_file.name.lower()
|
|
|
125 |
r = analyze_message(content)
|
126 |
outputs.append(
|
127 |
"── Uploaded File ──\n"
|
128 |
+
f"Emotion Profile : {r['emotion_profile']}\n"
|
129 |
+
f"Active Patterns : {r['active_patterns']}\n"
|
130 |
+
f"Emotional Tone : {r['tone_tag']}\n"
|
131 |
)
|
132 |
|
133 |
+
# text inputs
|
134 |
for idx, txt in enumerate(texts, start=1):
|
135 |
if not txt:
|
136 |
continue
|
137 |
r = analyze_message(txt)
|
138 |
outputs.append(
|
139 |
f"── Message {idx} ──\n"
|
140 |
+
f"Emotion Profile : {r['emotion_profile']}\n"
|
141 |
+
f"Active Patterns : {r['active_patterns']}\n"
|
142 |
+
f"Emotional Tone : {r['tone_tag']}\n"
|
143 |
)
|
144 |
|
145 |
if not outputs:
|
|
|
147 |
|
148 |
return "\n".join(outputs)
|
149 |
|
150 |
+
|
151 |
# ——— 6) Gradio interface ————————————————————————————————————————————————
|
152 |
message_inputs = [gr.Textbox(label=f"Message {i+1}") for i in range(3)]
|
153 |
|
154 |
iface = gr.Interface(
|
155 |
fn=analyze_composite,
|
156 |
+
inputs=[gr.File(file_types=[".txt", ".png", ".jpg", ".jpeg"], label="Upload text or image")] + message_inputs,
|
|
|
|
|
|
|
157 |
outputs=gr.Textbox(label="Analysis"),
|
158 |
+
title="Tether Analyzer (streamlined)",
|
159 |
+
description="Emotion profiling, pattern tags, and tone tagging—no motif detection, no abuse score or DARVO."
|
160 |
)
|
161 |
|
162 |
if __name__ == "__main__":
|
163 |
+
iface.launch()
|