Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,8 @@ from datetime import datetime
|
|
11 |
from torch.nn.functional import sigmoid
|
12 |
from collections import Counter
|
13 |
import logging
|
|
|
|
|
14 |
|
15 |
# Set up logging
|
16 |
logging.basicConfig(level=logging.DEBUG)
|
@@ -490,32 +492,50 @@ def generate_abuse_score_chart(dates, scores, patterns):
|
|
490 |
|
491 |
def analyze_composite(msg1, msg2, msg3, *answers_and_none):
|
492 |
"""Analyze multiple messages and checklist responses"""
|
|
|
493 |
try:
|
494 |
# Process checklist responses
|
|
|
495 |
none_selected_checked = answers_and_none[-1]
|
496 |
responses_checked = any(answers_and_none[:-1])
|
497 |
none_selected = not responses_checked and none_selected_checked
|
|
|
|
|
|
|
|
|
498 |
|
499 |
if none_selected:
|
500 |
escalation_score = 0
|
501 |
escalation_note = "Checklist completed: no danger items reported."
|
502 |
escalation_completed = True
|
|
|
503 |
elif responses_checked:
|
504 |
escalation_score = sum(w for (_, w), a in zip(ESCALATION_QUESTIONS, answers_and_none[:-1]) if a)
|
505 |
escalation_note = "Checklist completed."
|
506 |
escalation_completed = True
|
|
|
|
|
|
|
|
|
|
|
|
|
507 |
else:
|
508 |
escalation_score = None
|
509 |
escalation_note = "Checklist not completed."
|
510 |
escalation_completed = False
|
|
|
511 |
|
512 |
# Process messages
|
|
|
513 |
messages = [msg1, msg2, msg3]
|
514 |
active = [(m, f"Message {i+1}") for i, m in enumerate(messages) if m.strip()]
|
|
|
515 |
if not active:
|
|
|
516 |
return "Please enter at least one message.", None
|
517 |
|
518 |
# Detect threats
|
|
|
519 |
def normalize(text):
|
520 |
import unicodedata
|
521 |
text = text.lower().strip()
|
@@ -532,8 +552,32 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
|
|
532 |
flat_threats = [t for sublist in immediate_threats for t in sublist]
|
533 |
threat_risk = "Yes" if flat_threats else "No"
|
534 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
535 |
# Analyze each message
|
536 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
537 |
|
538 |
# Extract scores and metadata
|
539 |
abuse_scores = [r[0][0] for r in results]
|
@@ -542,8 +586,11 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
|
|
542 |
tone_tags = [r[0][6] for r in results]
|
543 |
dates_used = [r[1] for r in results]
|
544 |
|
545 |
-
|
546 |
predicted_labels = [label for r in results for label in r[0][1]]
|
|
|
|
|
|
|
547 |
high = {'control'}
|
548 |
moderate = {'gaslighting', 'dismissiveness', 'obscure language', 'insults',
|
549 |
'contradictory statements', 'guilt tripping'}
|
@@ -553,65 +600,91 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
|
|
553 |
for label in predicted_labels:
|
554 |
if label in high:
|
555 |
counts['high'] += 1
|
|
|
556 |
elif label in moderate:
|
557 |
counts['moderate'] += 1
|
|
|
558 |
elif label in low:
|
559 |
counts['low'] += 1
|
|
|
|
|
|
|
560 |
|
561 |
# Pattern escalation logic
|
|
|
562 |
if counts['high'] >= 2 and counts['moderate'] >= 2:
|
563 |
pattern_escalation_risk = "Critical"
|
|
|
564 |
elif (counts['high'] >= 2 and counts['moderate'] >= 1) or \
|
565 |
(counts['moderate'] >= 3) or \
|
566 |
(counts['high'] >= 1 and counts['moderate'] >= 2):
|
567 |
pattern_escalation_risk = "High"
|
|
|
568 |
elif (counts['moderate'] == 2) or \
|
569 |
(counts['high'] == 1 and counts['moderate'] == 1) or \
|
570 |
(counts['moderate'] == 1 and counts['low'] >= 2) or \
|
571 |
(counts['high'] == 1 and sum(counts.values()) == 1):
|
572 |
pattern_escalation_risk = "Moderate"
|
|
|
573 |
else:
|
574 |
pattern_escalation_risk = "Low"
|
|
|
575 |
|
576 |
-
# Calculate escalation risk
|
|
|
577 |
checklist_escalation_risk = "Unknown" if escalation_score is None else (
|
578 |
"Critical" if escalation_score >= 20 else
|
579 |
"Moderate" if escalation_score >= 10 else
|
580 |
"Low"
|
581 |
)
|
|
|
582 |
|
583 |
# Calculate escalation bump
|
|
|
584 |
escalation_bump = 0
|
585 |
-
for result,
|
586 |
abuse_score, _, _, sentiment, stage, darvo_score, tone_tag = result
|
|
|
587 |
if darvo_score > 0.65:
|
588 |
escalation_bump += 3
|
|
|
589 |
if tone_tag in ["forced accountability flip", "emotional threat"]:
|
590 |
escalation_bump += 2
|
|
|
591 |
if abuse_score > 80:
|
592 |
escalation_bump += 2
|
|
|
593 |
if stage == 2:
|
594 |
escalation_bump += 3
|
|
|
|
|
|
|
595 |
|
596 |
# Calculate combined risk
|
|
|
597 |
def rank(label):
|
598 |
return {"Low": 0, "Moderate": 1, "High": 2, "Critical": 3, "Unknown": 0}.get(label, 0)
|
599 |
|
600 |
combined_score = rank(pattern_escalation_risk) + rank(checklist_escalation_risk) + escalation_bump
|
|
|
|
|
601 |
escalation_risk = (
|
602 |
"Critical" if combined_score >= 6 else
|
603 |
"High" if combined_score >= 4 else
|
604 |
"Moderate" if combined_score >= 2 else
|
605 |
"Low"
|
606 |
)
|
|
|
607 |
|
608 |
# Build escalation text
|
|
|
609 |
if escalation_score is None:
|
610 |
escalation_text = (
|
611 |
"🚫 **Escalation Potential: Unknown** (Checklist not completed)\n"
|
612 |
"⚠️ This section was not completed. Escalation potential is estimated using message data only.\n"
|
613 |
)
|
614 |
hybrid_score = 0
|
|
|
615 |
elif escalation_score == 0:
|
616 |
escalation_text = (
|
617 |
"✅ **Escalation Checklist Completed:** No danger items reported.\n"
|
@@ -621,6 +694,7 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
|
|
621 |
f"• Escalation Bump: +{escalation_bump} (from DARVO, tone, intensity, etc.)"
|
622 |
)
|
623 |
hybrid_score = escalation_bump
|
|
|
624 |
else:
|
625 |
hybrid_score = escalation_score + escalation_bump
|
626 |
escalation_text = (
|
@@ -630,15 +704,20 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
|
|
630 |
f"• Checklist Risk: {checklist_escalation_risk}\n"
|
631 |
f"• Escalation Bump: +{escalation_bump} (from DARVO, tone, intensity, etc.)"
|
632 |
)
|
|
|
633 |
|
634 |
-
#
|
|
|
635 |
composite_abuse = int(round(sum(abuse_scores) / len(abuse_scores)))
|
|
|
636 |
|
637 |
# Get most common stage
|
638 |
most_common_stage = max(set(stages), key=stages.count)
|
639 |
stage_text = RISK_STAGE_LABELS[most_common_stage]
|
|
|
640 |
|
641 |
-
# Build output
|
|
|
642 |
out = f"Abuse Intensity: {composite_abuse}%\n"
|
643 |
out += "📊 This reflects the strength and severity of detected abuse patterns in the message(s).\n\n"
|
644 |
|
@@ -649,6 +728,7 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
|
|
649 |
"Moderate" if composite_abuse >= 50 or hybrid_score >= 10 else
|
650 |
"Low"
|
651 |
)
|
|
|
652 |
|
653 |
risk_descriptions = {
|
654 |
"Critical": (
|
@@ -678,40 +758,52 @@ def analyze_composite(msg1, msg2, msg3, *answers_and_none):
|
|
678 |
|
679 |
# Add DARVO analysis
|
680 |
avg_darvo = round(sum(darvo_scores) / len(darvo_scores), 3)
|
|
|
681 |
if avg_darvo > 0.25:
|
682 |
level = "moderate" if avg_darvo < 0.65 else "high"
|
683 |
out += f"\n\n🎭 **DARVO Score: {avg_darvo}** → This indicates a **{level} likelihood** of narrative reversal (DARVO), where the speaker may be denying, attacking, or reversing blame."
|
684 |
|
685 |
# Add emotional tones
|
|
|
686 |
out += "\n\n🎭 **Emotional Tones Detected:**\n"
|
687 |
for i, tone in enumerate(tone_tags):
|
688 |
out += f"• Message {i+1}: *{tone or 'none'}*\n"
|
|
|
689 |
|
690 |
# Add threats section
|
|
|
691 |
if flat_threats:
|
692 |
out += "\n\n🚨 **Immediate Danger Threats Detected:**\n"
|
693 |
for t in set(flat_threats):
|
694 |
out += f"• \"{t}\"\n"
|
695 |
out += "\n⚠️ These phrases may indicate an imminent risk to physical safety."
|
|
|
696 |
else:
|
697 |
out += "\n\n🧩 **Immediate Danger Threats:** None explicitly detected.\n"
|
698 |
out += "This does *not* rule out risk, but no direct threat phrases were matched."
|
|
|
699 |
|
700 |
# Generate timeline
|
|
|
701 |
pattern_labels = [
|
702 |
pats[0][0] if (pats := r[0][2]) else "none"
|
703 |
for r in results
|
704 |
]
|
705 |
timeline_image = generate_abuse_score_chart(dates_used, abuse_scores, pattern_labels)
|
|
|
706 |
|
707 |
# Add escalation text
|
708 |
out += "\n\n" + escalation_text
|
|
|
|
|
709 |
return out, timeline_image
|
710 |
|
711 |
except Exception as e:
|
712 |
logger.error(f"Error in analyze_composite: {e}")
|
|
|
713 |
return "An error occurred during analysis.", None
|
714 |
|
|
|
715 |
# Gradio Interface Setup
|
716 |
def create_interface():
|
717 |
try:
|
|
|
11 |
from torch.nn.functional import sigmoid
|
12 |
from collections import Counter
|
13 |
import logging
|
14 |
+
import traceback
|
15 |
+
|
16 |
|
17 |
# Set up logging
|
18 |
logging.basicConfig(level=logging.DEBUG)
|
|
|
492 |
|
493 |
def analyze_composite(msg1, msg2, msg3, *answers_and_none):
|
494 |
"""Analyze multiple messages and checklist responses"""
|
495 |
+
logger.debug("\n====== STARTING NEW ANALYSIS ======")
|
496 |
try:
|
497 |
# Process checklist responses
|
498 |
+
logger.debug("\n--- Checklist Processing ---")
|
499 |
none_selected_checked = answers_and_none[-1]
|
500 |
responses_checked = any(answers_and_none[:-1])
|
501 |
none_selected = not responses_checked and none_selected_checked
|
502 |
+
|
503 |
+
logger.debug(f"None selected checked: {none_selected_checked}")
|
504 |
+
logger.debug(f"Responses checked: {responses_checked}")
|
505 |
+
logger.debug(f"None selected: {none_selected}")
|
506 |
|
507 |
if none_selected:
|
508 |
escalation_score = 0
|
509 |
escalation_note = "Checklist completed: no danger items reported."
|
510 |
escalation_completed = True
|
511 |
+
logger.debug("No items selected in checklist")
|
512 |
elif responses_checked:
|
513 |
escalation_score = sum(w for (_, w), a in zip(ESCALATION_QUESTIONS, answers_and_none[:-1]) if a)
|
514 |
escalation_note = "Checklist completed."
|
515 |
escalation_completed = True
|
516 |
+
logger.debug(f"Checklist completed with score: {escalation_score}")
|
517 |
+
# Log checked items
|
518 |
+
logger.debug("Checked items:")
|
519 |
+
for (q, w), a in zip(ESCALATION_QUESTIONS, answers_and_none[:-1]):
|
520 |
+
if a:
|
521 |
+
logger.debug(f"• {q} (weight: {w})")
|
522 |
else:
|
523 |
escalation_score = None
|
524 |
escalation_note = "Checklist not completed."
|
525 |
escalation_completed = False
|
526 |
+
logger.debug("Checklist not completed")
|
527 |
|
528 |
# Process messages
|
529 |
+
logger.debug("\n--- Message Processing ---")
|
530 |
messages = [msg1, msg2, msg3]
|
531 |
active = [(m, f"Message {i+1}") for i, m in enumerate(messages) if m.strip()]
|
532 |
+
logger.debug(f"Number of active messages: {len(active)}")
|
533 |
if not active:
|
534 |
+
logger.debug("No messages provided")
|
535 |
return "Please enter at least one message.", None
|
536 |
|
537 |
# Detect threats
|
538 |
+
logger.debug("\n--- Threat Detection ---")
|
539 |
def normalize(text):
|
540 |
import unicodedata
|
541 |
text = text.lower().strip()
|
|
|
552 |
flat_threats = [t for sublist in immediate_threats for t in sublist]
|
553 |
threat_risk = "Yes" if flat_threats else "No"
|
554 |
|
555 |
+
if flat_threats:
|
556 |
+
logger.debug("Detected threats:")
|
557 |
+
for threat in flat_threats:
|
558 |
+
logger.debug(f"• {threat}")
|
559 |
+
else:
|
560 |
+
logger.debug("No explicit threats detected")
|
561 |
+
|
562 |
# Analyze each message
|
563 |
+
logger.debug("\n--- Individual Message Analysis ---")
|
564 |
+
results = []
|
565 |
+
for m, d in active:
|
566 |
+
logger.debug(f"\nAnalyzing {d}:")
|
567 |
+
logger.debug("-" * 40)
|
568 |
+
result = analyze_single_message(m, THRESHOLDS.copy())
|
569 |
+
results.append((result, d))
|
570 |
+
|
571 |
+
# Log results for each message
|
572 |
+
abuse_score, patterns, matched_scores, sentiment, stage, darvo_score, tone = result
|
573 |
+
logger.debug(f"Results for {d}:")
|
574 |
+
logger.debug(f"• Abuse Score: {abuse_score}")
|
575 |
+
logger.debug(f"• Patterns: {patterns}")
|
576 |
+
logger.debug(f"• Matched Scores: {matched_scores}")
|
577 |
+
logger.debug(f"• Sentiment: {sentiment['label']}")
|
578 |
+
logger.debug(f"• Stage: {stage}")
|
579 |
+
logger.debug(f"• DARVO Score: {darvo_score}")
|
580 |
+
logger.debug(f"• Tone: {tone}")
|
581 |
|
582 |
# Extract scores and metadata
|
583 |
abuse_scores = [r[0][0] for r in results]
|
|
|
586 |
tone_tags = [r[0][6] for r in results]
|
587 |
dates_used = [r[1] for r in results]
|
588 |
|
589 |
+
logger.debug("\n--- Pattern Analysis ---")
|
590 |
predicted_labels = [label for r in results for label in r[0][1]]
|
591 |
+
logger.debug(f"All detected patterns: {predicted_labels}")
|
592 |
+
# Pattern severity analysis
|
593 |
+
logger.debug("\n--- Pattern Severity Analysis ---")
|
594 |
high = {'control'}
|
595 |
moderate = {'gaslighting', 'dismissiveness', 'obscure language', 'insults',
|
596 |
'contradictory statements', 'guilt tripping'}
|
|
|
600 |
for label in predicted_labels:
|
601 |
if label in high:
|
602 |
counts['high'] += 1
|
603 |
+
logger.debug(f"High severity pattern found: {label}")
|
604 |
elif label in moderate:
|
605 |
counts['moderate'] += 1
|
606 |
+
logger.debug(f"Moderate severity pattern found: {label}")
|
607 |
elif label in low:
|
608 |
counts['low'] += 1
|
609 |
+
logger.debug(f"Low severity pattern found: {label}")
|
610 |
+
|
611 |
+
logger.debug(f"Pattern counts - High: {counts['high']}, Moderate: {counts['moderate']}, Low: {counts['low']}")
|
612 |
|
613 |
# Pattern escalation logic
|
614 |
+
logger.debug("\n--- Escalation Risk Assessment ---")
|
615 |
if counts['high'] >= 2 and counts['moderate'] >= 2:
|
616 |
pattern_escalation_risk = "Critical"
|
617 |
+
logger.debug("Critical risk: Multiple high and moderate patterns")
|
618 |
elif (counts['high'] >= 2 and counts['moderate'] >= 1) or \
|
619 |
(counts['moderate'] >= 3) or \
|
620 |
(counts['high'] >= 1 and counts['moderate'] >= 2):
|
621 |
pattern_escalation_risk = "High"
|
622 |
+
logger.debug("High risk: Significant pattern combination")
|
623 |
elif (counts['moderate'] == 2) or \
|
624 |
(counts['high'] == 1 and counts['moderate'] == 1) or \
|
625 |
(counts['moderate'] == 1 and counts['low'] >= 2) or \
|
626 |
(counts['high'] == 1 and sum(counts.values()) == 1):
|
627 |
pattern_escalation_risk = "Moderate"
|
628 |
+
logger.debug("Moderate risk: Concerning pattern combination")
|
629 |
else:
|
630 |
pattern_escalation_risk = "Low"
|
631 |
+
logger.debug("Low risk: Limited pattern severity")
|
632 |
|
633 |
+
# Calculate checklist escalation risk
|
634 |
+
logger.debug("\n--- Checklist Risk Assessment ---")
|
635 |
checklist_escalation_risk = "Unknown" if escalation_score is None else (
|
636 |
"Critical" if escalation_score >= 20 else
|
637 |
"Moderate" if escalation_score >= 10 else
|
638 |
"Low"
|
639 |
)
|
640 |
+
logger.debug(f"Checklist escalation risk: {checklist_escalation_risk}")
|
641 |
|
642 |
# Calculate escalation bump
|
643 |
+
logger.debug("\n--- Escalation Bump Calculation ---")
|
644 |
escalation_bump = 0
|
645 |
+
for result, msg_id in results:
|
646 |
abuse_score, _, _, sentiment, stage, darvo_score, tone_tag = result
|
647 |
+
logger.debug(f"\nChecking escalation factors for {msg_id}:")
|
648 |
if darvo_score > 0.65:
|
649 |
escalation_bump += 3
|
650 |
+
logger.debug("• +3 for high DARVO score")
|
651 |
if tone_tag in ["forced accountability flip", "emotional threat"]:
|
652 |
escalation_bump += 2
|
653 |
+
logger.debug("• +2 for concerning tone")
|
654 |
if abuse_score > 80:
|
655 |
escalation_bump += 2
|
656 |
+
logger.debug("• +2 for high abuse score")
|
657 |
if stage == 2:
|
658 |
escalation_bump += 3
|
659 |
+
logger.debug("• +3 for escalation stage")
|
660 |
+
|
661 |
+
logger.debug(f"Total escalation bump: +{escalation_bump}")
|
662 |
|
663 |
# Calculate combined risk
|
664 |
+
logger.debug("\n--- Combined Risk Calculation ---")
|
665 |
def rank(label):
|
666 |
return {"Low": 0, "Moderate": 1, "High": 2, "Critical": 3, "Unknown": 0}.get(label, 0)
|
667 |
|
668 |
combined_score = rank(pattern_escalation_risk) + rank(checklist_escalation_risk) + escalation_bump
|
669 |
+
logger.debug(f"Combined risk score: {combined_score}")
|
670 |
+
|
671 |
escalation_risk = (
|
672 |
"Critical" if combined_score >= 6 else
|
673 |
"High" if combined_score >= 4 else
|
674 |
"Moderate" if combined_score >= 2 else
|
675 |
"Low"
|
676 |
)
|
677 |
+
logger.debug(f"Final escalation risk: {escalation_risk}")
|
678 |
|
679 |
# Build escalation text
|
680 |
+
logger.debug("\n--- Building Output Text ---")
|
681 |
if escalation_score is None:
|
682 |
escalation_text = (
|
683 |
"🚫 **Escalation Potential: Unknown** (Checklist not completed)\n"
|
684 |
"⚠️ This section was not completed. Escalation potential is estimated using message data only.\n"
|
685 |
)
|
686 |
hybrid_score = 0
|
687 |
+
logger.debug("Generated unknown escalation text")
|
688 |
elif escalation_score == 0:
|
689 |
escalation_text = (
|
690 |
"✅ **Escalation Checklist Completed:** No danger items reported.\n"
|
|
|
694 |
f"• Escalation Bump: +{escalation_bump} (from DARVO, tone, intensity, etc.)"
|
695 |
)
|
696 |
hybrid_score = escalation_bump
|
697 |
+
logger.debug("Generated no-risk escalation text")
|
698 |
else:
|
699 |
hybrid_score = escalation_score + escalation_bump
|
700 |
escalation_text = (
|
|
|
704 |
f"• Checklist Risk: {checklist_escalation_risk}\n"
|
705 |
f"• Escalation Bump: +{escalation_bump} (from DARVO, tone, intensity, etc.)"
|
706 |
)
|
707 |
+
logger.debug(f"Generated escalation text with hybrid score: {hybrid_score}")
|
708 |
|
709 |
+
# Calculate composite abuse score
|
710 |
+
logger.debug("\n--- Final Scores ---")
|
711 |
composite_abuse = int(round(sum(abuse_scores) / len(abuse_scores)))
|
712 |
+
logger.debug(f"Composite abuse score: {composite_abuse}")
|
713 |
|
714 |
# Get most common stage
|
715 |
most_common_stage = max(set(stages), key=stages.count)
|
716 |
stage_text = RISK_STAGE_LABELS[most_common_stage]
|
717 |
+
logger.debug(f"Most common stage: {most_common_stage}")
|
718 |
|
719 |
+
# Build final output
|
720 |
+
logger.debug("\n--- Generating Final Output ---")
|
721 |
out = f"Abuse Intensity: {composite_abuse}%\n"
|
722 |
out += "📊 This reflects the strength and severity of detected abuse patterns in the message(s).\n\n"
|
723 |
|
|
|
728 |
"Moderate" if composite_abuse >= 50 or hybrid_score >= 10 else
|
729 |
"Low"
|
730 |
)
|
731 |
+
logger.debug(f"Final risk level: {risk_level}")
|
732 |
|
733 |
risk_descriptions = {
|
734 |
"Critical": (
|
|
|
758 |
|
759 |
# Add DARVO analysis
|
760 |
avg_darvo = round(sum(darvo_scores) / len(darvo_scores), 3)
|
761 |
+
logger.debug(f"Average DARVO score: {avg_darvo}")
|
762 |
if avg_darvo > 0.25:
|
763 |
level = "moderate" if avg_darvo < 0.65 else "high"
|
764 |
out += f"\n\n🎭 **DARVO Score: {avg_darvo}** → This indicates a **{level} likelihood** of narrative reversal (DARVO), where the speaker may be denying, attacking, or reversing blame."
|
765 |
|
766 |
# Add emotional tones
|
767 |
+
logger.debug("\n--- Adding Emotional Tones ---")
|
768 |
out += "\n\n🎭 **Emotional Tones Detected:**\n"
|
769 |
for i, tone in enumerate(tone_tags):
|
770 |
out += f"• Message {i+1}: *{tone or 'none'}*\n"
|
771 |
+
logger.debug(f"Message {i+1} tone: {tone}")
|
772 |
|
773 |
# Add threats section
|
774 |
+
logger.debug("\n--- Adding Threat Analysis ---")
|
775 |
if flat_threats:
|
776 |
out += "\n\n🚨 **Immediate Danger Threats Detected:**\n"
|
777 |
for t in set(flat_threats):
|
778 |
out += f"• \"{t}\"\n"
|
779 |
out += "\n⚠️ These phrases may indicate an imminent risk to physical safety."
|
780 |
+
logger.debug(f"Added {len(flat_threats)} threat warnings")
|
781 |
else:
|
782 |
out += "\n\n🧩 **Immediate Danger Threats:** None explicitly detected.\n"
|
783 |
out += "This does *not* rule out risk, but no direct threat phrases were matched."
|
784 |
+
logger.debug("No threats to add")
|
785 |
|
786 |
# Generate timeline
|
787 |
+
logger.debug("\n--- Generating Timeline ---")
|
788 |
pattern_labels = [
|
789 |
pats[0][0] if (pats := r[0][2]) else "none"
|
790 |
for r in results
|
791 |
]
|
792 |
timeline_image = generate_abuse_score_chart(dates_used, abuse_scores, pattern_labels)
|
793 |
+
logger.debug("Timeline generated successfully")
|
794 |
|
795 |
# Add escalation text
|
796 |
out += "\n\n" + escalation_text
|
797 |
+
|
798 |
+
logger.debug("\n====== ANALYSIS COMPLETE ======\n")
|
799 |
return out, timeline_image
|
800 |
|
801 |
except Exception as e:
|
802 |
logger.error(f"Error in analyze_composite: {e}")
|
803 |
+
logger.error(f"Traceback: {traceback.format_exc()}")
|
804 |
return "An error occurred during analysis.", None
|
805 |
|
806 |
+
|
807 |
# Gradio Interface Setup
|
808 |
def create_interface():
|
809 |
try:
|