SamanthaStorm commited on
Commit
e5da456
Β·
verified Β·
1 Parent(s): 9c4d0f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -0
app.py CHANGED
@@ -187,6 +187,8 @@ def get_emotion_profile(text):
187
  "fear": 0.0
188
  }
189
 
 
 
190
  def get_emotional_tone_tag(text, sentiment, patterns, abuse_score):
191
  """Get emotional tone tag based on emotions and patterns"""
192
  emotions = get_emotion_profile(text)
@@ -198,6 +200,20 @@ def get_emotional_tone_tag(text, sentiment, patterns, abuse_score):
198
  anger = emotions.get("anger", 0)
199
  fear = emotions.get("fear", 0)
200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  # 1. Performative Regret
202
  if (
203
  sadness > 0.3 and
@@ -977,7 +993,22 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
977
  logger.debug("\nπŸ“„ GENERATING FINAL REPORT")
978
  logger.debug("=" * 50)
979
  out = f"Abuse Intensity: {composite_abuse}%\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
980
  out += "πŸ“Š This reflects the strength and severity of detected abuse patterns in the message(s).\n\n"
 
981
 
982
  # Risk Level Assessment
983
  risk_level = (
 
187
  "fear": 0.0
188
  }
189
 
190
+
191
+
192
  def get_emotional_tone_tag(text, sentiment, patterns, abuse_score):
193
  """Get emotional tone tag based on emotions and patterns"""
194
  emotions = get_emotion_profile(text)
 
200
  anger = emotions.get("anger", 0)
201
  fear = emotions.get("fear", 0)
202
 
203
+ # Direct Threat (New)
204
+ text_lower = text.lower()
205
+ threat_indicators = [
206
+ "if you", "i'll make", "don't forget", "remember", "regret",
207
+ "i control", "i'll take", "you'll lose", "make sure",
208
+ "never see", "won't let"
209
+ ]
210
+ if (
211
+ any(indicator in text_lower for indicator in threat_indicators) and
212
+ any(p in patterns for p in ["control", "insults"]) and
213
+ (anger > 0.2 or disgust > 0.2 or abuse_score > 70)
214
+ ):
215
+ return "direct threat"
216
+
217
  # 1. Performative Regret
218
  if (
219
  sadness > 0.3 and
 
993
  logger.debug("\nπŸ“„ GENERATING FINAL REPORT")
994
  logger.debug("=" * 50)
995
  out = f"Abuse Intensity: {composite_abuse}%\n"
996
+ # Add detected patterns to output
997
+ if predicted_labels:
998
+ out += "πŸ” Detected Patterns:\n"
999
+ if high_patterns:
1000
+ patterns_str = ", ".join(f"{p} ({pattern_counts[p]}x)" for p in high_patterns)
1001
+ out += f"❗ High Severity: {patterns_str}\n"
1002
+ if moderate_patterns:
1003
+ patterns_str = ", ".join(f"{p} ({pattern_counts[p]}x)" for p in moderate_patterns)
1004
+ out += f"⚠️ Moderate Severity: {patterns_str}\n"
1005
+ if low_patterns:
1006
+ patterns_str = ", ".join(f"{p} ({pattern_counts[p]}x)" for p in low_patterns)
1007
+ out += f"πŸ“ Low Severity: {patterns_str}\n"
1008
+ out += "\n"
1009
+
1010
  out += "πŸ“Š This reflects the strength and severity of detected abuse patterns in the message(s).\n\n"
1011
+
1012
 
1013
  # Risk Level Assessment
1014
  risk_level = (