SamanthaStorm commited on
Commit
b79ebea
Β·
verified Β·
1 Parent(s): bcfa072

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -2
app.py CHANGED
@@ -456,6 +456,11 @@ def analyze_single_message(text, thresholds):
456
 
457
  # Get tone using emotion-based approach
458
  tone_tag = get_emotional_tone_tag(text, sentiment, threshold_labels, abuse_score)
 
 
 
 
 
459
 
460
  # Set stage
461
  stage = 2 if explicit_abuse or abuse_score > 70 else 1
@@ -609,15 +614,34 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
609
  immediate_threats = [detect_threat_motifs(m, THREAT_MOTIFS) for m, _ in active]
610
  flat_threats = [t for sublist in immediate_threats for t in sublist]
611
  threat_risk = "Yes" if flat_threats else "No"
612
- # Analyze each message
613
  logger.debug("\nπŸ” INDIVIDUAL MESSAGE ANALYSIS")
614
  logger.debug("=" * 50)
615
  results = []
616
  for m, d in active:
617
  logger.debug(f"\nπŸ“ ANALYZING {d}")
618
- logger.debug("=" * 40)
619
  result = analyze_single_message(m, THRESHOLDS.copy())
 
 
 
 
 
 
 
 
620
  results.append((result, d))
 
 
 
 
 
 
 
 
 
 
 
621
 
622
  # Unpack results for cleaner logging
623
  abuse_score, patterns, matched_scores, sentiment, stage, darvo_score, tone = result
 
456
 
457
  # Get tone using emotion-based approach
458
  tone_tag = get_emotional_tone_tag(text, sentiment, threshold_labels, abuse_score)
459
+ # Check for the specific combination
460
+ highest_pattern = max(matched_scores, key=lambda x: x[1])[0] if matched_scores else None # Get highest pattern
461
+ if sentiment == "supportive" and tone_tag == "neutral" and highest_pattern == "obscure language":
462
+ logger.debug("Message classified as likely non-abusive (supportive, neutral, and obscure language). Returning low risk.")
463
+ return 0.0, [], [], {"label": "supportive"}, 1, 0.0, "neutral" # Return non-abusive values
464
 
465
  # Set stage
466
  stage = 2 if explicit_abuse or abuse_score > 70 else 1
 
614
  immediate_threats = [detect_threat_motifs(m, THREAT_MOTIFS) for m, _ in active]
615
  flat_threats = [t for sublist in immediate_threats for t in sublist]
616
  threat_risk = "Yes" if flat_threats else "No"
617
+ # Analyze each message
618
  logger.debug("\nπŸ” INDIVIDUAL MESSAGE ANALYSIS")
619
  logger.debug("=" * 50)
620
  results = []
621
  for m, d in active:
622
  logger.debug(f"\nπŸ“ ANALYZING {d}")
623
+ logger.debug("-" * 40) # Separator for each message
624
  result = analyze_single_message(m, THRESHOLDS.copy())
625
+
626
+ # Check for non-abusive classification and skip further analysis
627
+ if result[0] == 0.0 and result[1] == [] and result[3] == {"label": "supportive"} and result[4] == 1 and result[5] == 0.0 and result[6] == "neutral":
628
+ logger.debug(f"βœ“ {d} classified as non-abusive, skipping further analysis.")
629
+ # Option to include in final output (uncomment if needed):
630
+ # results.append(({"abuse_score": 0.0, "patterns": [], "sentiment": {"label": "supportive"}, "stage": 1, "darvo_score": 0.0, "tone": "neutral"}, d))
631
+ continue # Skip to the next message
632
+
633
  results.append((result, d))
634
+ # Log the detailed results for the current message (if not skipped)
635
+ abuse_score, patterns, matched_scores, sentiment, stage, darvo_score, tone = result
636
+ logger.debug(f"\nπŸ“Š Results for {d}:")
637
+ logger.debug(f" β€’ Abuse Score: {abuse_score:.1f}%")
638
+ logger.debug(f" β€’ DARVO Score: {darvo_score:.3f}")
639
+ logger.debug(f" β€’ Risk Stage: {stage}")
640
+ logger.debug(f" β€’ Sentiment: {sentiment['label']}")
641
+ logger.debug(f" β€’ Tone: {tone}")
642
+ if patterns:
643
+ logger.debug(" β€’ Patterns: " + ", ".join(patterns))
644
+
645
 
646
  # Unpack results for cleaner logging
647
  abuse_score, patterns, matched_scores, sentiment, stage, darvo_score, tone = result