SamanthaStorm commited on
Commit
2efdba9
Β·
verified Β·
1 Parent(s): fda55e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +123 -121
app.py CHANGED
@@ -4,151 +4,153 @@ import numpy as np
4
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
5
  from transformers import RobertaForSequenceClassification, RobertaTokenizer
6
 
7
- # Load custom fine-tuned sentiment model
8
- sentiment_model = AutoModelForSequenceClassification.from_pretrained("SamanthaStorm/tether-sentiment")
9
- sentiment_tokenizer = AutoTokenizer.from_pretrained("SamanthaStorm/tether-sentiment")
10
 
11
- # Load abuse pattern model
12
- model_name = "SamanthaStorm/abuse-pattern-detector-v2"
 
 
 
 
13
  model = RobertaForSequenceClassification.from_pretrained(model_name, trust_remote_code=True)
14
  tokenizer = RobertaTokenizer.from_pretrained(model_name, trust_remote_code=True)
15
 
16
  LABELS = [
17
- "gaslighting", "mockery", "dismissiveness", "control", "guilt_tripping", "apology_baiting", "blame_shifting", "projection",
18
- "contradictory_statements", "manipulation", "deflection", "insults", "obscure_formal", "recovery_phase", "non_abusive",
19
- "suicidal_threat", "physical_threat", "extreme_control"
20
  ]
21
 
22
  THRESHOLDS = {
23
- "gaslighting": 0.25, "mockery": 0.15, "dismissiveness": 0.30, "control": 0.43, "guilt_tripping": 0.19,
24
- "apology_baiting": 0.45, "blame_shifting": 0.23, "projection": 0.50, "contradictory_statements": 0.25,
25
- "manipulation": 0.25, "deflection": 0.30, "insults": 0.34, "obscure_formal": 0.25, "recovery_phase": 0.25,
26
- "non_abusive": 2.0, "suicidal_threat": 0.45, "physical_threat": 0.02, "extreme_control": 0.36
27
  }
28
 
29
  PATTERN_LABELS = LABELS[:15]
30
  DANGER_LABELS = LABELS[15:18]
31
 
32
  EXPLANATIONS = {
33
- "gaslighting": "Gaslighting involves making someone question their own reality or perceptions...",
34
- "blame_shifting": "Blame-shifting is when one person redirects the responsibility...",
35
- "projection": "Projection involves accusing the victim of behaviors the abuser exhibits.",
36
- "dismissiveness": "Dismissiveness is belittling or disregarding another person’s feelings.",
37
- "mockery": "Mockery ridicules someone in a hurtful, humiliating way.",
38
- "recovery_phase": "Recovery phase dismisses someone's emotional healing process.",
39
- "insults": "Insults are derogatory remarks aimed at degrading someone.",
40
- "apology_baiting": "Apology-baiting manipulates victims into apologizing for abuser's behavior.",
41
- "deflection": "Deflection avoids accountability by redirecting blame.",
42
- "control": "Control restricts autonomy through manipulation or coercion.",
43
- "extreme_control": "Extreme control dominates decisions and behaviors entirely.",
44
- "physical_threat": "Physical threats signal risk of bodily harm.",
45
- "suicidal_threat": "Suicidal threats manipulate others using self-harm threats.",
46
- "guilt_tripping": "Guilt-tripping uses guilt to manipulate someone’s actions.",
47
- "manipulation": "Manipulation deceives to influence or control outcomes.",
48
- "non_abusive": "Non-abusive language is respectful and free of coercion.",
49
- "obscure_formal": "Obscure/formal language manipulates through confusion or superiority."
50
  }
51
 
52
  def custom_sentiment(text):
53
- inputs = sentiment_tokenizer(text, return_tensors="pt", truncation=True, padding=True)
54
- with torch.no_grad():
55
- outputs = sentiment_model(**inputs)
56
- probs = torch.nn.functional.softmax(outputs.logits, dim=1)
57
- label_idx = torch.argmax(probs).item()
58
- label_map = {0: "supportive", 1: "undermining"}
59
- label = label_map[label_idx]
60
- score = probs[0][label_idx].item()
61
- return {"label": label, "score": score}
 
 
62
 
63
  def calculate_abuse_level(scores, thresholds):
64
- triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
65
- return round(np.mean(triggered_scores) * 100, 2) if triggered_scores else 0.0
66
 
67
  def interpret_abuse_level(score):
68
- if score > 80: return "Extreme / High Risk"
69
- elif score > 60: return "Severe / Harmful Pattern Present"
70
- elif score > 40: return "Likely Abuse"
71
- elif score > 20: return "Mild Concern"
72
- return "Very Low / Likely Safe"
73
 
74
  def analyze_messages(input_text, risk_flags):
75
- input_text = input_text.strip()
76
- if not input_text:
77
- return "Please enter a message for analysis."
78
-
79
- sentiment = custom_sentiment(input_text)
80
- sentiment_label = sentiment['label']
81
- sentiment_score = sentiment['score']
82
-
83
- adjusted_thresholds = {k: v * 0.8 for k, v in THRESHOLDS.items()} if sentiment_label == "undermining" else THRESHOLDS.copy()
84
-
85
- inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
86
- with torch.no_grad():
87
- outputs = model(**inputs)
88
- scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
89
-
90
- pattern_count = sum(score > adjusted_thresholds[label] for label, score in zip(PATTERN_LABELS, scores[:15]))
91
- danger_flag_count = sum(score > adjusted_thresholds[label] for label, score in zip(DANGER_LABELS, scores[15:18]))
92
-
93
- contextual_flags = risk_flags if risk_flags else []
94
- if len(contextual_flags) >= 2:
95
- danger_flag_count += 1
96
-
97
- critical_flags = ["They've threatened harm", "They monitor/follow me", "I feel unsafe when alone with them"]
98
- high_risk_context = any(flag in contextual_flags for flag in critical_flags)
99
- if high_risk_context:
100
- danger_flag_count += 1
101
-
102
- non_abusive_score = scores[LABELS.index('non_abusive')]
103
- if non_abusive_score > adjusted_thresholds['non_abusive']:
104
- return "This message is classified as non-abusive."
105
-
106
- abuse_level = calculate_abuse_level(scores, adjusted_thresholds)
107
- abuse_description = interpret_abuse_level(abuse_level)
108
-
109
- if danger_flag_count >= 2:
110
- resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
111
- else:
112
- resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
113
-
114
- scored_patterns = [
115
- (label, score) for label, score in zip(PATTERN_LABELS, scores[:15])
116
- if label != "non_abusive"
117
- ]
118
- top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
119
-
120
- top_pattern_explanations = "\n".join([
121
- f"\u2022 {label.replace('_', ' ').title()}: {EXPLANATIONS.get(label, 'No explanation available.')}"
122
- for label, _ in top_patterns
123
- ])
124
-
125
- result = (
126
- f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n\n"
127
- f"Most Likely Patterns:\n{top_pattern_explanations}\n\n"
128
- f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
129
- "Resources: " + resources + "\n\n"
130
- f"Sentiment: {sentiment_label.title()} (Confidence: {sentiment_score*100:.2f}%)"
131
- )
132
-
133
- if contextual_flags:
134
- result += "\n\n⚠️ You indicated the following:\n" + "\n".join([f"β€’ {flag}" for flag in contextual_flags])
135
- if high_risk_context:
136
- result += "\n\n🚨 These responses suggest a high-risk situation. Consider seeking immediate help or safety planning resources."
137
-
138
- return result
139
 
140
  iface = gr.Interface(
141
- fn=analyze_messages,
142
- inputs=[
143
- gr.Textbox(lines=10, placeholder="Enter message here..."),
144
- gr.CheckboxGroup(label="Do any of these apply to your situation?", choices=[
145
- "They've threatened harm", "They isolate me", "I’ve changed my behavior out of fear",
146
- "They monitor/follow me", "I feel unsafe when alone with them"
147
- ])
148
- ],
149
- outputs=[gr.Textbox(label="Analysis Result")],
150
- title="Abuse Pattern Detector"
151
  )
152
 
153
- if __name__ == "__main__":
154
- iface.launch()
 
4
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
5
  from transformers import RobertaForSequenceClassification, RobertaTokenizer
6
 
7
+ Load custom fine-tuned sentiment model
 
 
8
 
9
+ sentiment_model = AutoModelForSequenceClassification.from_pretrained(β€œSamanthaStorm/tether-sentiment”)
10
+ sentiment_tokenizer = AutoTokenizer.from_pretrained(β€œSamanthaStorm/tether-sentiment”)
11
+
12
+ Load abuse pattern model
13
+
14
+ model_name = β€œSamanthaStorm/abuse-pattern-detector-v2”
15
  model = RobertaForSequenceClassification.from_pretrained(model_name, trust_remote_code=True)
16
  tokenizer = RobertaTokenizer.from_pretrained(model_name, trust_remote_code=True)
17
 
18
  LABELS = [
19
+ β€œgaslighting”, β€œmockery”, β€œdismissiveness”, β€œcontrol”, β€œguilt_tripping”, β€œapology_baiting”, β€œblame_shifting”, β€œprojection”,
20
+ β€œcontradictory_statements”, β€œmanipulation”, β€œdeflection”, β€œinsults”, β€œobscure_formal”, β€œrecovery_phase”, β€œnon_abusive”,
21
+ β€œsuicidal_threat”, β€œphysical_threat”, β€œextreme_control”
22
  ]
23
 
24
  THRESHOLDS = {
25
+ β€œgaslighting”: 0.25, β€œmockery”: 0.15, β€œdismissiveness”: 0.30, β€œcontrol”: 0.43, β€œguilt_tripping”: 0.19,
26
+ β€œapology_baiting”: 0.45, β€œblame_shifting”: 0.23, β€œprojection”: 0.50, β€œcontradictory_statements”: 0.25,
27
+ β€œmanipulation”: 0.25, β€œdeflection”: 0.30, β€œinsults”: 0.34, β€œobscure_formal”: 0.25, β€œrecovery_phase”: 0.25,
28
+ β€œnon_abusive”: 2.0, β€œsuicidal_threat”: 0.45, β€œphysical_threat”: 0.02, β€œextreme_control”: 0.36
29
  }
30
 
31
  PATTERN_LABELS = LABELS[:15]
32
  DANGER_LABELS = LABELS[15:18]
33
 
34
  EXPLANATIONS = {
35
+ β€œgaslighting”: β€œGaslighting involves making someone question their own reality or perceptions…”,
36
+ β€œblame_shifting”: β€œBlame-shifting is when one person redirects the responsibility…”,
37
+ β€œprojection”: β€œProjection involves accusing the victim of behaviors the abuser exhibits.”,
38
+ β€œdismissiveness”: β€œDismissiveness is belittling or disregarding another person’s feelings.”,
39
+ β€œmockery”: β€œMockery ridicules someone in a hurtful, humiliating way.”,
40
+ β€œrecovery_phase”: β€œRecovery phase dismisses someone’s emotional healing process.”,
41
+ β€œinsults”: β€œInsults are derogatory remarks aimed at degrading someone.”,
42
+ β€œapology_baiting”: β€œApology-baiting manipulates victims into apologizing for abuser’s behavior.”,
43
+ β€œdeflection”: β€œDeflection avoids accountability by redirecting blame.”,
44
+ β€œcontrol”: β€œControl restricts autonomy through manipulation or coercion.”,
45
+ β€œextreme_control”: β€œExtreme control dominates decisions and behaviors entirely.”,
46
+ β€œphysical_threat”: β€œPhysical threats signal risk of bodily harm.”,
47
+ β€œsuicidal_threat”: β€œSuicidal threats manipulate others using self-harm threats.”,
48
+ β€œguilt_tripping”: β€œGuilt-tripping uses guilt to manipulate someone’s actions.”,
49
+ β€œmanipulation”: β€œManipulation deceives to influence or control outcomes.”,
50
+ β€œnon_abusive”: β€œNon-abusive language is respectful and free of coercion.”,
51
+ β€œobscure_formal”: β€œObscure/formal language manipulates through confusion or superiority.”
52
  }
53
 
54
  def custom_sentiment(text):
55
+ inputs = sentiment_tokenizer(text, return_tensors=β€œpt”, truncation=True, padding=True)
56
+ with torch.no_grad():
57
+ outputs = sentiment_model(**inputs)
58
+ probs = torch.nn.functional.softmax(outputs.logits, dim=1)
59
+ label_idx = torch.argmax(probs).item()
60
+
61
+ label_map = {0: "supportive", 1: "undermining"}
62
+ label = label_map[label_idx]
63
+
64
+ score = probs[0][label_idx].item()
65
+ return {"label": label, "score": score}
66
 
67
  def calculate_abuse_level(scores, thresholds):
68
+ triggered_scores = [score for label, score in zip(LABELS, scores) if score > thresholds[label]]
69
+ return round(np.mean(triggered_scores) * 100, 2) if triggered_scores else 0.0
70
 
71
  def interpret_abuse_level(score):
72
+ if score > 80: return β€œExtreme / High Risk”
73
+ elif score > 60: return β€œSevere / Harmful Pattern Present”
74
+ elif score > 40: return β€œLikely Abuse”
75
+ elif score > 20: return β€œMild Concern”
76
+ return β€œVery Low / Likely Safe”
77
 
78
  def analyze_messages(input_text, risk_flags):
79
+ input_text = input_text.strip()
80
+ if not input_text:
81
+ return β€œPlease enter a message for analysis.”
82
+
83
+ sentiment = custom_sentiment(input_text)
84
+ sentiment_label = sentiment['label']
85
+ sentiment_score = sentiment['score']
86
+
87
+ adjusted_thresholds = {k: v * 0.8 for k, v in THRESHOLDS.items()} if sentiment_label == "undermining" else THRESHOLDS.copy()
88
+
89
+ inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True)
90
+ with torch.no_grad():
91
+ outputs = model(**inputs)
92
+ scores = torch.sigmoid(outputs.logits.squeeze(0)).numpy()
93
+
94
+ pattern_count = sum(score > adjusted_thresholds[label] for label, score in zip(PATTERN_LABELS, scores[:15]))
95
+ danger_flag_count = sum(score > adjusted_thresholds[label] for label, score in zip(DANGER_LABELS, scores[15:18]))
96
+
97
+ contextual_flags = risk_flags if risk_flags else []
98
+ if len(contextual_flags) >= 2:
99
+ danger_flag_count += 1
100
+
101
+ critical_flags = ["They've threatened harm", "They monitor/follow me", "I feel unsafe when alone with them"]
102
+ high_risk_context = any(flag in contextual_flags for flag in critical_flags)
103
+
104
+ non_abusive_score = scores[LABELS.index('non_abusive')]
105
+ if non_abusive_score > adjusted_thresholds['non_abusive']:
106
+ return "This message is classified as non-abusive."
107
+
108
+ abuse_level = calculate_abuse_level(scores, adjusted_thresholds)
109
+ abuse_description = interpret_abuse_level(abuse_level)
110
+
111
+ if danger_flag_count >= 2:
112
+ resources = "Immediate assistance recommended. Please seek professional help or contact emergency services."
113
+ else:
114
+ resources = "For more information on abuse patterns, consider reaching out to support groups or professional counselors."
115
+
116
+ scored_patterns = [
117
+ (label, score) for label, score in zip(LABELS, scores)
118
+ if label != "non_abusive" and score > adjusted_thresholds[label]
119
+ ]
120
+ top_patterns = sorted(scored_patterns, key=lambda x: x[1], reverse=True)[:2]
121
+
122
+ top_pattern_explanations = "\n".join([
123
+ f"\u2022 {label.replace('_', ' ').title()}: {EXPLANATIONS.get(label, 'No explanation available.')}"
124
+ for label, _ in top_patterns
125
+ ])
126
+
127
+ result = (
128
+ f"Abuse Risk Score: {abuse_level}% – {abuse_description}\n\n"
129
+ f"Most Likely Patterns:\n{top_pattern_explanations}\n\n"
130
+ f"⚠️ Critical Danger Flags Detected: {danger_flag_count} of 3\n"
131
+ "Resources: " + resources + "\n\n"
132
+ f"Sentiment: {sentiment_label.title()} (Confidence: {sentiment_score*100:.2f}%)"
133
+ )
134
+
135
+ if contextual_flags:
136
+ result += "\n\n⚠️ You indicated the following:\n" + "\n".join([f"β€’ {flag}" for flag in contextual_flags])
137
+ if high_risk_context:
138
+ result += "\n\n🚨 These responses suggest a high-risk situation. Consider seeking immediate help or safety planning resources."
139
+
140
+ return result
 
 
141
 
142
  iface = gr.Interface(
143
+ fn=analyze_messages,
144
+ inputs=[
145
+ gr.Textbox(lines=10, placeholder=β€œEnter message here…”),
146
+ gr.CheckboxGroup(label=β€œDo any of these apply to your situation?”, choices=[
147
+ β€œThey’ve threatened harm”, β€œThey isolate me”, β€œI’ve changed my behavior out of fear”,
148
+ β€œThey monitor/follow me”, β€œI feel unsafe when alone with them”
149
+ ])
150
+ ],
151
+ outputs=[gr.Textbox(label=β€œAnalysis Result”)],
152
+ title=β€œAbuse Pattern Detector”
153
  )
154
 
155
+ if name == β€œmain”:
156
+ iface.launch()