LPX55
commited on
Commit
·
f78b194
1
Parent(s):
b0a1e2b
major(feat): per-agent logging system with AgentLogger class
Browse files- Introduced AgentLogger to manage logging for different agents.
- Created log files for ensemble monitoring, weight optimization, system health, context intelligence, and forensic anomaly detection.
- Updated EnsembleMonitorAgent, WeightOptimizationAgent, SystemHealthAgent, ContextualIntelligenceAgent, and ForensicAnomalyDetectionAgent to use AgentLogger for logging.
- Modified app.py to integrate agent logs into the Gradio interface.
- Added log reading functionality to display agent logs in the UI.
- Removed old logging setup and streamlined logging across agents for consistency.
- .gitattributes +1 -0
- .gitignore +1 -1
- .python-version +0 -1
- agent_logs/context_intelligence.log +5 -0
- agent_logs/ensemble_monitor.log +46 -0
- agent_logs/forensic_anomaly_detection.log +5 -0
- agent_logs/gradio_log.txt +7 -0
- agent_logs/system_health.log +12 -0
- agent_logs/weight_optimization.log +8 -0
- agents/ensemble_team.py +19 -27
- agents/ensemble_weights.py +17 -15
- agents/smart_agents.py +19 -34
- app.py +110 -93
- temp_gradio_input.png +0 -3
- utils/agent_logger.py +47 -0
.gitattributes
CHANGED
@@ -38,4 +38,5 @@ preview/127.0.0.1_7860__.png filter=lfs diff=lfs merge=lfs -text
|
|
38 |
preview/2.png filter=lfs diff=lfs merge=lfs -text
|
39 |
preview/3.png filter=lfs diff=lfs merge=lfs -text
|
40 |
preview/4.png filter=lfs diff=lfs merge=lfs -text
|
|
|
41 |
*.png filter=lfs diff=lfs merge=lfs -text
|
|
|
38 |
preview/2.png filter=lfs diff=lfs merge=lfs -text
|
39 |
preview/3.png filter=lfs diff=lfs merge=lfs -text
|
40 |
preview/4.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
temp_gradio_input.png filter=lfs diff=lfs merge=lfs -text
|
42 |
*.png filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
@@ -13,4 +13,4 @@ models/*
|
|
13 |
hf_inference_logs/*.json
|
14 |
hf_inference_logs/*
|
15 |
.gradio/flagged/*
|
16 |
-
.venv
|
|
|
13 |
hf_inference_logs/*.json
|
14 |
hf_inference_logs/*
|
15 |
.gradio/flagged/*
|
16 |
+
.venv
|
.python-version
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
3.11
|
|
|
|
agent_logs/context_intelligence.log
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2025-07-26 11:06:45,553 - INFO - Detected context tags: ['potentially_ai_generated'] (agent_logger.py:37)
|
2 |
+
2025-07-26 11:29:16,042 - INFO - Initializing ContextualIntelligenceAgent. (agent_logger.py:37)
|
3 |
+
2025-07-26 11:32:02,894 - INFO - Initializing ContextualIntelligenceAgent. (agent_logger.py:37)
|
4 |
+
2025-07-26 11:35:25,364 - INFO - Initializing ContextualIntelligenceAgent. (agent_logger.py:37)
|
5 |
+
2025-07-26 11:52:12,710 - INFO - Initializing ContextualIntelligenceAgent. (agent_logger.py:37)
|
agent_logs/ensemble_monitor.log
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2025-07-26 11:08:13,463 - INFO - Cleaned forensic images types: [<class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>] (agent_logger.py:37)
|
2 |
+
2025-07-26 11:29:16,039 - INFO - Initializing EnsembleMonitorAgent. (agent_logger.py:37)
|
3 |
+
2025-07-26 11:32:02,885 - INFO - Initializing EnsembleMonitorAgent. (agent_logger.py:37)
|
4 |
+
2025-07-26 11:32:10,166 - INFO - Monitoring prediction for model 'model_1'. Label: REAL, Confidence: 0.96, Time: 6.0362s (agent_logger.py:37)
|
5 |
+
2025-07-26 11:32:10,166 - WARNING - ALERT: Model 'model_1' inference time exceeded 5.0s: 6.0362s (agent_logger.py:37)
|
6 |
+
2025-07-26 11:32:10,167 - INFO - Updated metrics for 'model_1': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9558695554733276, 'total_inference_time': 6.036171197891235} (agent_logger.py:37)
|
7 |
+
2025-07-26 11:35:25,360 - INFO - Initializing EnsembleMonitorAgent. (agent_logger.py:37)
|
8 |
+
2025-07-26 11:35:29,213 - INFO - Monitoring prediction for model 'model_1'. Label: REAL, Confidence: 0.99, Time: 2.4478s (agent_logger.py:37)
|
9 |
+
2025-07-26 11:35:29,214 - INFO - Updated metrics for 'model_1': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9877696633338928, 'total_inference_time': 2.447831630706787} (agent_logger.py:37)
|
10 |
+
2025-07-26 11:35:32,846 - INFO - Monitoring prediction for model 'model_2'. Label: UNCERTAIN, Confidence: 0.56, Time: 3.4874s (agent_logger.py:37)
|
11 |
+
2025-07-26 11:35:32,846 - INFO - Updated metrics for 'model_2': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.5572928190231323, 'total_inference_time': 3.4874162673950195} (agent_logger.py:37)
|
12 |
+
2025-07-26 11:35:37,782 - INFO - Monitoring prediction for model 'model_3'. Label: AI, Confidence: 1.00, Time: 4.7423s (agent_logger.py:37)
|
13 |
+
2025-07-26 11:35:37,783 - INFO - Updated metrics for 'model_3': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9999939203262329, 'total_inference_time': 4.74231481552124} (agent_logger.py:37)
|
14 |
+
2025-07-26 11:35:38,753 - INFO - Monitoring prediction for model 'model_4'. Label: REAL, Confidence: 1.00, Time: 0.7961s (agent_logger.py:37)
|
15 |
+
2025-07-26 11:35:38,753 - INFO - Updated metrics for 'model_4': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9999666213989258, 'total_inference_time': 0.7960679531097412} (agent_logger.py:37)
|
16 |
+
2025-07-26 11:35:42,145 - INFO - Monitoring prediction for model 'model_5'. Label: REAL, Confidence: 0.80, Time: 3.2266s (agent_logger.py:37)
|
17 |
+
2025-07-26 11:35:42,146 - INFO - Updated metrics for 'model_5': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.7969127297401428, 'total_inference_time': 3.2265756130218506} (agent_logger.py:37)
|
18 |
+
2025-07-26 11:35:48,001 - INFO - Monitoring prediction for model 'model_6'. Label: AI, Confidence: 1.00, Time: 5.6884s (agent_logger.py:37)
|
19 |
+
2025-07-26 11:35:48,002 - WARNING - ALERT: Model 'model_6' inference time exceeded 5.0s: 5.6884s (agent_logger.py:37)
|
20 |
+
2025-07-26 11:35:48,002 - INFO - Updated metrics for 'model_6': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9999886751174927, 'total_inference_time': 5.688395023345947} (agent_logger.py:37)
|
21 |
+
2025-07-26 11:35:51,060 - INFO - Monitoring prediction for model 'model_7'. Label: AI, Confidence: 0.88, Time: 2.9033s (agent_logger.py:37)
|
22 |
+
2025-07-26 11:35:51,060 - INFO - Updated metrics for 'model_7': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.8818904161453247, 'total_inference_time': 2.9033284187316895} (agent_logger.py:37)
|
23 |
+
2025-07-26 11:36:00,757 - INFO - Monitoring prediction for model 'model_8'. Label: REAL, Confidence: 0.97, Time: 9.5506s (agent_logger.py:37)
|
24 |
+
2025-07-26 11:36:00,757 - WARNING - ALERT: Model 'model_8' inference time exceeded 5.0s: 9.5506s (agent_logger.py:37)
|
25 |
+
2025-07-26 11:36:00,757 - INFO - Updated metrics for 'model_8': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9748013019561768, 'total_inference_time': 9.550556182861328} (agent_logger.py:37)
|
26 |
+
2025-07-26 11:52:12,706 - INFO - Initializing EnsembleMonitorAgent. (agent_logger.py:37)
|
27 |
+
2025-07-26 11:52:20,481 - INFO - Monitoring prediction for model 'model_1'. Label: REAL, Confidence: 0.99, Time: 6.3035s (agent_logger.py:37)
|
28 |
+
2025-07-26 11:52:20,482 - WARNING - ALERT: Model 'model_1' inference time exceeded 5.0s: 6.3035s (agent_logger.py:37)
|
29 |
+
2025-07-26 11:52:20,483 - INFO - Updated metrics for 'model_1': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9877696633338928, 'total_inference_time': 6.303498029708862} (agent_logger.py:37)
|
30 |
+
2025-07-26 11:52:23,521 - INFO - Monitoring prediction for model 'model_2'. Label: UNCERTAIN, Confidence: 0.56, Time: 2.8905s (agent_logger.py:37)
|
31 |
+
2025-07-26 11:52:23,521 - INFO - Updated metrics for 'model_2': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.5572928190231323, 'total_inference_time': 2.8904731273651123} (agent_logger.py:37)
|
32 |
+
2025-07-26 11:52:28,959 - INFO - Monitoring prediction for model 'model_3'. Label: AI, Confidence: 1.00, Time: 5.2830s (agent_logger.py:37)
|
33 |
+
2025-07-26 11:52:28,960 - WARNING - ALERT: Model 'model_3' inference time exceeded 5.0s: 5.2830s (agent_logger.py:37)
|
34 |
+
2025-07-26 11:52:28,960 - INFO - Updated metrics for 'model_3': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9999939203262329, 'total_inference_time': 5.283019542694092} (agent_logger.py:37)
|
35 |
+
2025-07-26 11:52:30,000 - INFO - Monitoring prediction for model 'model_4'. Label: REAL, Confidence: 1.00, Time: 0.8660s (agent_logger.py:37)
|
36 |
+
2025-07-26 11:52:30,000 - INFO - Updated metrics for 'model_4': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9999666213989258, 'total_inference_time': 0.8659720420837402} (agent_logger.py:37)
|
37 |
+
2025-07-26 11:52:33,719 - INFO - Monitoring prediction for model 'model_5'. Label: REAL, Confidence: 0.80, Time: 3.1757s (agent_logger.py:37)
|
38 |
+
2025-07-26 11:52:33,720 - INFO - Updated metrics for 'model_5': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.7969127297401428, 'total_inference_time': 3.175741195678711} (agent_logger.py:37)
|
39 |
+
2025-07-26 11:52:39,124 - INFO - Monitoring prediction for model 'model_6'. Label: AI, Confidence: 1.00, Time: 5.2289s (agent_logger.py:37)
|
40 |
+
2025-07-26 11:52:39,124 - WARNING - ALERT: Model 'model_6' inference time exceeded 5.0s: 5.2289s (agent_logger.py:37)
|
41 |
+
2025-07-26 11:52:39,124 - INFO - Updated metrics for 'model_6': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9999886751174927, 'total_inference_time': 5.228850841522217} (agent_logger.py:37)
|
42 |
+
2025-07-26 11:52:42,460 - INFO - Monitoring prediction for model 'model_7'. Label: AI, Confidence: 0.88, Time: 3.1972s (agent_logger.py:37)
|
43 |
+
2025-07-26 11:52:42,461 - INFO - Updated metrics for 'model_7': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.8818904161453247, 'total_inference_time': 3.197218418121338} (agent_logger.py:37)
|
44 |
+
2025-07-26 11:52:51,110 - INFO - Monitoring prediction for model 'model_8'. Label: REAL, Confidence: 0.97, Time: 8.5060s (agent_logger.py:37)
|
45 |
+
2025-07-26 11:52:51,110 - WARNING - ALERT: Model 'model_8' inference time exceeded 5.0s: 8.5060s (agent_logger.py:37)
|
46 |
+
2025-07-26 11:52:51,110 - INFO - Updated metrics for 'model_8': {'total_predictions': 1, 'correct_predictions': 0, 'total_confidence': 0.9748013019561768, 'total_inference_time': 8.50600266456604} (agent_logger.py:37)
|
agent_logs/forensic_anomaly_detection.log
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2025-07-26 11:06:45,557 - INFO - Forensic anomaly detection: Potential anomalies detected: Gradient analysis shows unusual edge patterns.; MinMax processing reveals subtle pixel deviations. (agent_logger.py:37)
|
2 |
+
2025-07-26 11:29:16,042 - INFO - Initializing ForensicAnomalyDetectionAgent. (agent_logger.py:37)
|
3 |
+
2025-07-26 11:32:02,896 - INFO - Initializing ForensicAnomalyDetectionAgent. (agent_logger.py:37)
|
4 |
+
2025-07-26 11:35:25,364 - INFO - Initializing ForensicAnomalyDetectionAgent. (agent_logger.py:37)
|
5 |
+
2025-07-26 11:52:12,710 - INFO - Initializing ForensicAnomalyDetectionAgent. (agent_logger.py:37)
|
agent_logs/gradio_log.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[32;20m2025-07-26 10:34:11,730 - INFO - The logs will be displayed in here. (app.py:94)[0m
|
2 |
+
[32;20m2025-07-26 10:36:40,701 - INFO - The logs will be displayed in here. (app.py:94)[0m
|
3 |
+
[32;20m2025-07-26 10:40:19,369 - INFO - The logs will be displayed in here. (app.py:94)[0m
|
4 |
+
[32;20m2025-07-26 10:40:24,440 - INFO - The logs will be displayed in here. (app.py:94)[0m
|
5 |
+
[32;20m2025-07-26 10:41:58,076 - INFO - Detected context tags: ['potentially_ai_generated'] (app.py:368)[0m
|
6 |
+
[32;20m2025-07-26 10:41:58,089 - INFO - Forensic anomaly detection: Potential anomalies detected: Gradient analysis shows unusual edge patterns.; MinMax processing reveals subtle pixel deviations. (app.py:405)[0m
|
7 |
+
[32;20m2025-07-26 10:42:02,958 - INFO - Cleaned forensic images types: [<class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>, <class 'PIL.Image.Image'>] (app.py:446)[0m
|
agent_logs/system_health.log
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2025-07-26 11:29:16,042 - INFO - Initializing SystemHealthAgent. (agent_logger.py:37)
|
2 |
+
2025-07-26 11:29:16,043 - INFO - Monitoring system health... (agent_logger.py:37)
|
3 |
+
2025-07-26 11:29:17,146 - INFO - System health metrics: CPU: 17.8%, Memory: 64.0%, GPU: [{'id': 0, 'name': 'NVIDIA GeForce RTX 3060 Ti', 'load': 0.08, 'memoryUtil': 0.232666015625, 'memoryTotal': 8192.0, 'memoryUsed': 1906.0}] (agent_logger.py:37)
|
4 |
+
2025-07-26 11:32:02,894 - INFO - Initializing SystemHealthAgent. (agent_logger.py:37)
|
5 |
+
2025-07-26 11:32:02,896 - INFO - Monitoring system health... (agent_logger.py:37)
|
6 |
+
2025-07-26 11:32:04,010 - INFO - System health metrics: CPU: 19.3%, Memory: 64.3%, GPU: [{'id': 0, 'name': 'NVIDIA GeForce RTX 3060 Ti', 'load': 0.02, 'memoryUtil': 0.2371826171875, 'memoryTotal': 8192.0, 'memoryUsed': 1943.0}] (agent_logger.py:37)
|
7 |
+
2025-07-26 11:35:25,363 - INFO - Initializing SystemHealthAgent. (agent_logger.py:37)
|
8 |
+
2025-07-26 11:35:25,364 - INFO - Monitoring system health... (agent_logger.py:37)
|
9 |
+
2025-07-26 11:35:26,464 - INFO - System health metrics: CPU: 18.6%, Memory: 76.0%, GPU: [{'id': 0, 'name': 'NVIDIA GeForce RTX 3060 Ti', 'load': 0.09, 'memoryUtil': 0.2296142578125, 'memoryTotal': 8192.0, 'memoryUsed': 1881.0}] (agent_logger.py:37)
|
10 |
+
2025-07-26 11:52:12,709 - INFO - Initializing SystemHealthAgent. (agent_logger.py:37)
|
11 |
+
2025-07-26 11:52:12,711 - INFO - Monitoring system health... (agent_logger.py:37)
|
12 |
+
2025-07-26 11:52:13,837 - INFO - System health metrics: CPU: 18.5%, Memory: 66.2%, GPU: [{'id': 0, 'name': 'NVIDIA GeForce RTX 3060 Ti', 'load': 0.07, 'memoryUtil': 0.2266845703125, 'memoryTotal': 8192.0, 'memoryUsed': 1857.0}] (agent_logger.py:37)
|
agent_logs/weight_optimization.log
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2025-07-26 11:29:16,041 - INFO - Initializing ModelWeightManager. Strongest model: simple_prediction (agent_logger.py:37)
|
2 |
+
2025-07-26 11:29:16,042 - INFO - Initializing WeightOptimizationAgent. (agent_logger.py:37)
|
3 |
+
2025-07-26 11:32:02,890 - INFO - Initializing ModelWeightManager. Strongest model: simple_prediction (agent_logger.py:37)
|
4 |
+
2025-07-26 11:32:02,893 - INFO - Initializing WeightOptimizationAgent. (agent_logger.py:37)
|
5 |
+
2025-07-26 11:35:25,363 - INFO - Initializing ModelWeightManager. Strongest model: simple_prediction (agent_logger.py:37)
|
6 |
+
2025-07-26 11:35:25,363 - INFO - Initializing WeightOptimizationAgent. (agent_logger.py:37)
|
7 |
+
2025-07-26 11:52:12,709 - INFO - Initializing ModelWeightManager. Strongest model: simple_prediction (agent_logger.py:37)
|
8 |
+
2025-07-26 11:52:12,709 - INFO - Initializing WeightOptimizationAgent. (agent_logger.py:37)
|
agents/ensemble_team.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import
|
2 |
import time
|
3 |
import torch
|
4 |
import psutil # Ensure psutil is imported here as well
|
@@ -6,16 +6,16 @@ import GPUtil
|
|
6 |
from datetime import datetime, timedelta
|
7 |
import gc # Import garbage collector
|
8 |
|
9 |
-
|
10 |
|
11 |
class EnsembleMonitorAgent:
|
12 |
def __init__(self):
|
13 |
-
|
14 |
self.performance_metrics = {}
|
15 |
self.alerts = []
|
16 |
|
17 |
def monitor_prediction(self, model_id, prediction_label, confidence_score, inference_time):
|
18 |
-
|
19 |
if model_id not in self.performance_metrics:
|
20 |
self.performance_metrics[model_id] = {
|
21 |
"total_predictions": 0,
|
@@ -23,28 +23,24 @@ class EnsembleMonitorAgent:
|
|
23 |
"total_confidence": 0.0,
|
24 |
"total_inference_time": 0.0
|
25 |
}
|
26 |
-
|
27 |
metrics = self.performance_metrics[model_id]
|
28 |
metrics["total_predictions"] += 1
|
29 |
metrics["total_confidence"] += confidence_score
|
30 |
metrics["total_inference_time"] += inference_time
|
31 |
-
|
32 |
# Example alert: model taking too long
|
33 |
if inference_time > 5.0: # Threshold for slow inference
|
34 |
alert_msg = f"ALERT: Model '{model_id}' inference time exceeded 5.0s: {inference_time:.4f}s"
|
35 |
self.alerts.append(alert_msg)
|
36 |
-
|
37 |
-
|
38 |
# Example alert: low confidence
|
39 |
if confidence_score < 0.5: # Threshold for low confidence
|
40 |
alert_msg = f"ALERT: Model '{model_id}' returned low confidence: {confidence_score:.2f}"
|
41 |
self.alerts.append(alert_msg)
|
42 |
-
|
43 |
-
|
44 |
-
logger.info(f"Updated metrics for '{model_id}': {metrics}")
|
45 |
|
46 |
def get_performance_summary(self):
|
47 |
-
|
48 |
summary = {}
|
49 |
for model_id, metrics in self.performance_metrics.items():
|
50 |
avg_confidence = metrics["total_confidence"] / metrics["total_predictions"] if metrics["total_predictions"] > 0 else 0
|
@@ -54,35 +50,33 @@ class EnsembleMonitorAgent:
|
|
54 |
"avg_inference_time": avg_inference_time,
|
55 |
"total_predictions": metrics["total_predictions"]
|
56 |
}
|
57 |
-
|
58 |
return summary
|
59 |
|
60 |
class WeightOptimizationAgent:
|
61 |
def __init__(self, weight_manager):
|
62 |
-
|
63 |
self.weight_manager = weight_manager
|
64 |
self.prediction_history = []
|
65 |
self.performance_window = timedelta(hours=24) # Evaluate performance over last 24 hours
|
66 |
|
67 |
def analyze_performance(self, final_prediction, ground_truth=None):
|
68 |
-
|
69 |
timestamp = datetime.now()
|
70 |
self.prediction_history.append({
|
71 |
"timestamp": timestamp,
|
72 |
"final_prediction": final_prediction,
|
73 |
"ground_truth": ground_truth # Ground truth is often not available in real-time
|
74 |
})
|
75 |
-
|
76 |
# Keep history windowed
|
77 |
self.prediction_history = [p for p in self.prediction_history if timestamp - p["timestamp"] < self.performance_window]
|
78 |
-
|
79 |
-
|
80 |
# In a real scenario, this would involve a more complex optimization logic
|
81 |
# For now, it just logs the history length.
|
82 |
|
83 |
class SystemHealthAgent:
|
84 |
def __init__(self):
|
85 |
-
|
86 |
self.health_metrics = {
|
87 |
"cpu_percent": 0,
|
88 |
"memory_usage": {"total": 0, "available": 0, "percent": 0},
|
@@ -90,7 +84,7 @@ class SystemHealthAgent:
|
|
90 |
}
|
91 |
|
92 |
def monitor_system_health(self):
|
93 |
-
|
94 |
self.health_metrics["cpu_percent"] = psutil.cpu_percent(interval=1)
|
95 |
mem = psutil.virtual_memory()
|
96 |
self.health_metrics["memory_usage"] = {
|
@@ -98,20 +92,18 @@ class SystemHealthAgent:
|
|
98 |
"available": mem.available,
|
99 |
"percent": mem.percent
|
100 |
}
|
101 |
-
|
102 |
# Holy moly, been at 99% for hours whoops
|
103 |
if mem.percent > 90:
|
104 |
-
|
105 |
gc.collect()
|
106 |
-
|
107 |
mem_after_gc = psutil.virtual_memory()
|
108 |
self.health_metrics["memory_usage_after_gc"] = {
|
109 |
"total": mem_after_gc.total,
|
110 |
"available": mem_after_gc.available,
|
111 |
"percent": mem_after_gc.percent
|
112 |
}
|
113 |
-
|
114 |
-
|
115 |
gpu_info = []
|
116 |
try:
|
117 |
gpus = GPUtil.getGPUs()
|
@@ -125,7 +117,7 @@ class SystemHealthAgent:
|
|
125 |
"memoryUsed": gpu.memoryUsed
|
126 |
})
|
127 |
except Exception as e:
|
128 |
-
|
129 |
gpu_info.append({"error": str(e)})
|
130 |
self.health_metrics["gpu_utilization"] = gpu_info
|
131 |
-
|
|
|
1 |
+
from utils.agent_logger import AgentLogger
|
2 |
import time
|
3 |
import torch
|
4 |
import psutil # Ensure psutil is imported here as well
|
|
|
6 |
from datetime import datetime, timedelta
|
7 |
import gc # Import garbage collector
|
8 |
|
9 |
+
agent_logger = AgentLogger()
|
10 |
|
11 |
class EnsembleMonitorAgent:
|
12 |
def __init__(self):
|
13 |
+
agent_logger.log("ensemble_monitor", "info", "Initializing EnsembleMonitorAgent.")
|
14 |
self.performance_metrics = {}
|
15 |
self.alerts = []
|
16 |
|
17 |
def monitor_prediction(self, model_id, prediction_label, confidence_score, inference_time):
|
18 |
+
agent_logger.log("ensemble_monitor", "info", f"Monitoring prediction for model '{model_id}'. Label: {prediction_label}, Confidence: {confidence_score:.2f}, Time: {inference_time:.4f}s")
|
19 |
if model_id not in self.performance_metrics:
|
20 |
self.performance_metrics[model_id] = {
|
21 |
"total_predictions": 0,
|
|
|
23 |
"total_confidence": 0.0,
|
24 |
"total_inference_time": 0.0
|
25 |
}
|
|
|
26 |
metrics = self.performance_metrics[model_id]
|
27 |
metrics["total_predictions"] += 1
|
28 |
metrics["total_confidence"] += confidence_score
|
29 |
metrics["total_inference_time"] += inference_time
|
|
|
30 |
# Example alert: model taking too long
|
31 |
if inference_time > 5.0: # Threshold for slow inference
|
32 |
alert_msg = f"ALERT: Model '{model_id}' inference time exceeded 5.0s: {inference_time:.4f}s"
|
33 |
self.alerts.append(alert_msg)
|
34 |
+
agent_logger.log("ensemble_monitor", "warning", alert_msg)
|
|
|
35 |
# Example alert: low confidence
|
36 |
if confidence_score < 0.5: # Threshold for low confidence
|
37 |
alert_msg = f"ALERT: Model '{model_id}' returned low confidence: {confidence_score:.2f}"
|
38 |
self.alerts.append(alert_msg)
|
39 |
+
agent_logger.log("ensemble_monitor", "warning", alert_msg)
|
40 |
+
agent_logger.log("ensemble_monitor", "info", f"Updated metrics for '{model_id}': {metrics}")
|
|
|
41 |
|
42 |
def get_performance_summary(self):
|
43 |
+
agent_logger.log("ensemble_monitor", "info", "Generating performance summary for all models.")
|
44 |
summary = {}
|
45 |
for model_id, metrics in self.performance_metrics.items():
|
46 |
avg_confidence = metrics["total_confidence"] / metrics["total_predictions"] if metrics["total_predictions"] > 0 else 0
|
|
|
50 |
"avg_inference_time": avg_inference_time,
|
51 |
"total_predictions": metrics["total_predictions"]
|
52 |
}
|
53 |
+
agent_logger.log("ensemble_monitor", "info", f"Performance summary: {summary}")
|
54 |
return summary
|
55 |
|
56 |
class WeightOptimizationAgent:
|
57 |
def __init__(self, weight_manager):
|
58 |
+
agent_logger.log("weight_optimization", "info", "Initializing WeightOptimizationAgent.")
|
59 |
self.weight_manager = weight_manager
|
60 |
self.prediction_history = []
|
61 |
self.performance_window = timedelta(hours=24) # Evaluate performance over last 24 hours
|
62 |
|
63 |
def analyze_performance(self, final_prediction, ground_truth=None):
|
64 |
+
agent_logger.log("weight_optimization", "info", f"Analyzing performance. Final prediction: {final_prediction}, Ground truth: {ground_truth}")
|
65 |
timestamp = datetime.now()
|
66 |
self.prediction_history.append({
|
67 |
"timestamp": timestamp,
|
68 |
"final_prediction": final_prediction,
|
69 |
"ground_truth": ground_truth # Ground truth is often not available in real-time
|
70 |
})
|
|
|
71 |
# Keep history windowed
|
72 |
self.prediction_history = [p for p in self.prediction_history if timestamp - p["timestamp"] < self.performance_window]
|
73 |
+
agent_logger.log("weight_optimization", "info", f"Prediction history length: {len(self.prediction_history)}")
|
|
|
74 |
# In a real scenario, this would involve a more complex optimization logic
|
75 |
# For now, it just logs the history length.
|
76 |
|
77 |
class SystemHealthAgent:
|
78 |
def __init__(self):
|
79 |
+
agent_logger.log("system_health", "info", "Initializing SystemHealthAgent.")
|
80 |
self.health_metrics = {
|
81 |
"cpu_percent": 0,
|
82 |
"memory_usage": {"total": 0, "available": 0, "percent": 0},
|
|
|
84 |
}
|
85 |
|
86 |
def monitor_system_health(self):
|
87 |
+
agent_logger.log("system_health", "info", "Monitoring system health...")
|
88 |
self.health_metrics["cpu_percent"] = psutil.cpu_percent(interval=1)
|
89 |
mem = psutil.virtual_memory()
|
90 |
self.health_metrics["memory_usage"] = {
|
|
|
92 |
"available": mem.available,
|
93 |
"percent": mem.percent
|
94 |
}
|
|
|
95 |
# Holy moly, been at 99% for hours whoops
|
96 |
if mem.percent > 90:
|
97 |
+
agent_logger.log("system_health", "warning", f"CRITICAL: System memory usage is at {mem.percent}%. Attempting to clear memory cache...")
|
98 |
gc.collect()
|
99 |
+
agent_logger.log("system_health", "info", "Garbage collection triggered. Re-checking memory usage...")
|
100 |
mem_after_gc = psutil.virtual_memory()
|
101 |
self.health_metrics["memory_usage_after_gc"] = {
|
102 |
"total": mem_after_gc.total,
|
103 |
"available": mem_after_gc.available,
|
104 |
"percent": mem_after_gc.percent
|
105 |
}
|
106 |
+
agent_logger.log("system_health", "info", f"Memory usage after GC: {mem_after_gc.percent}%")
|
|
|
107 |
gpu_info = []
|
108 |
try:
|
109 |
gpus = GPUtil.getGPUs()
|
|
|
117 |
"memoryUsed": gpu.memoryUsed
|
118 |
})
|
119 |
except Exception as e:
|
120 |
+
agent_logger.log("system_health", "warning", f"Could not retrieve GPU information: {e}")
|
121 |
gpu_info.append({"error": str(e)})
|
122 |
self.health_metrics["gpu_utilization"] = gpu_info
|
123 |
+
agent_logger.log("system_health", "info", f"System health metrics: CPU: {self.health_metrics['cpu_percent']}%, Memory: {self.health_metrics['memory_usage']['percent']}%, GPU: {gpu_info}")
|
agents/ensemble_weights.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import logging
|
2 |
import torch
|
3 |
from utils.registry import MODEL_REGISTRY # Import MODEL_REGISTRY
|
|
|
4 |
|
5 |
logger = logging.getLogger(__name__)
|
6 |
|
@@ -39,7 +40,8 @@ class ContextualWeightOverrideAgent:
|
|
39 |
|
40 |
class ModelWeightManager:
|
41 |
def __init__(self, strongest_model_id: str = None):
|
42 |
-
|
|
|
43 |
# Dynamically initialize base_weights from MODEL_REGISTRY
|
44 |
num_models = len(MODEL_REGISTRY)
|
45 |
if num_models > 0:
|
@@ -74,9 +76,9 @@ class ModelWeightManager:
|
|
74 |
|
75 |
def adjust_weights(self, predictions, confidence_scores, context_tags: list[str] = None):
|
76 |
"""Dynamically adjust weights based on prediction patterns and optional context."""
|
77 |
-
|
78 |
adjusted_weights = self.base_weights.copy()
|
79 |
-
|
80 |
|
81 |
# 1. Apply contextual overrides first
|
82 |
if context_tags:
|
@@ -84,34 +86,34 @@ class ModelWeightManager:
|
|
84 |
overrides = self.context_override_agent.get_overrides(context_tags)
|
85 |
for model_id, multiplier in overrides.items():
|
86 |
adjusted_weights[model_id] = adjusted_weights.get(model_id, 0.0) * multiplier
|
87 |
-
|
88 |
|
89 |
# 2. Apply situation-based adjustments (consensus, conflict, confidence)
|
90 |
# Check for consensus
|
91 |
has_consensus = self._has_consensus(predictions)
|
92 |
if has_consensus:
|
93 |
-
|
94 |
for model in adjusted_weights:
|
95 |
adjusted_weights[model] *= self.situation_weights["consensus"]
|
96 |
-
|
97 |
|
98 |
# Check for conflicts
|
99 |
has_conflicts = self._has_conflicts(predictions)
|
100 |
if has_conflicts:
|
101 |
-
|
102 |
for model in adjusted_weights:
|
103 |
adjusted_weights[model] *= self.situation_weights["conflict"]
|
104 |
-
|
105 |
|
106 |
# Adjust based on confidence
|
107 |
logger.info("Adjusting weights based on model confidence scores.")
|
108 |
for model, confidence in confidence_scores.items():
|
109 |
if confidence > 0.8:
|
110 |
adjusted_weights[model] *= self.situation_weights["high_confidence"]
|
111 |
-
|
112 |
elif confidence < 0.5:
|
113 |
adjusted_weights[model] *= self.situation_weights["low_confidence"]
|
114 |
-
|
115 |
logger.info(f"Adjusted weights before normalization: {adjusted_weights}")
|
116 |
|
117 |
normalized_weights = self._normalize_weights(adjusted_weights)
|
@@ -120,7 +122,7 @@ class ModelWeightManager:
|
|
120 |
|
121 |
def _has_consensus(self, predictions):
|
122 |
"""Check if models agree on prediction"""
|
123 |
-
|
124 |
non_none_predictions = [p.get("Label") for p in predictions.values() if p is not None and isinstance(p, dict) and p.get("Label") is not None and p.get("Label") != "Error"]
|
125 |
logger.debug(f"Non-none predictions for consensus check: {non_none_predictions}")
|
126 |
result = len(non_none_predictions) > 0 and len(set(non_none_predictions)) == 1
|
@@ -129,7 +131,7 @@ class ModelWeightManager:
|
|
129 |
|
130 |
def _has_conflicts(self, predictions):
|
131 |
"""Check if models have conflicting predictions"""
|
132 |
-
|
133 |
non_none_predictions = [p.get("Label") for p in predictions.values() if p is not None and isinstance(p, dict) and p.get("Label") is not None and p.get("Label") != "Error"]
|
134 |
logger.debug(f"Non-none predictions for conflict check: {non_none_predictions}")
|
135 |
result = len(non_none_predictions) > 1 and len(set(non_none_predictions)) > 1
|
@@ -138,10 +140,10 @@ class ModelWeightManager:
|
|
138 |
|
139 |
def _normalize_weights(self, weights):
|
140 |
"""Normalize weights to sum to 1"""
|
141 |
-
|
142 |
total = sum(weights.values())
|
143 |
if total == 0:
|
144 |
-
|
145 |
# Revert to equal weights for all *registered* models if total becomes zero
|
146 |
num_registered_models = len(MODEL_REGISTRY)
|
147 |
if num_registered_models > 0:
|
@@ -149,5 +151,5 @@ class ModelWeightManager:
|
|
149 |
else:
|
150 |
return {} # No models registered
|
151 |
normalized = {k: v/total for k, v in weights.items()}
|
152 |
-
|
153 |
return normalized
|
|
|
1 |
import logging
|
2 |
import torch
|
3 |
from utils.registry import MODEL_REGISTRY # Import MODEL_REGISTRY
|
4 |
+
from utils.agent_logger import AgentLogger
|
5 |
|
6 |
logger = logging.getLogger(__name__)
|
7 |
|
|
|
40 |
|
41 |
class ModelWeightManager:
|
42 |
def __init__(self, strongest_model_id: str = None):
|
43 |
+
agent_logger = AgentLogger()
|
44 |
+
agent_logger.log("weight_optimization", "info", f"Initializing ModelWeightManager. Strongest model: {strongest_model_id}")
|
45 |
# Dynamically initialize base_weights from MODEL_REGISTRY
|
46 |
num_models = len(MODEL_REGISTRY)
|
47 |
if num_models > 0:
|
|
|
76 |
|
77 |
def adjust_weights(self, predictions, confidence_scores, context_tags: list[str] = None):
|
78 |
"""Dynamically adjust weights based on prediction patterns and optional context."""
|
79 |
+
agent_logger.log("weight_optimization", "info", "Adjusting model weights.")
|
80 |
adjusted_weights = self.base_weights.copy()
|
81 |
+
agent_logger.log("weight_optimization", "info", f"Initial adjusted weights (copy of base): {adjusted_weights}")
|
82 |
|
83 |
# 1. Apply contextual overrides first
|
84 |
if context_tags:
|
|
|
86 |
overrides = self.context_override_agent.get_overrides(context_tags)
|
87 |
for model_id, multiplier in overrides.items():
|
88 |
adjusted_weights[model_id] = adjusted_weights.get(model_id, 0.0) * multiplier
|
89 |
+
agent_logger.log("weight_optimization", "info", f"Adjusted weights after context overrides: {adjusted_weights}")
|
90 |
|
91 |
# 2. Apply situation-based adjustments (consensus, conflict, confidence)
|
92 |
# Check for consensus
|
93 |
has_consensus = self._has_consensus(predictions)
|
94 |
if has_consensus:
|
95 |
+
agent_logger.log("weight_optimization", "info", "Consensus detected. Boosting weights for consensus.")
|
96 |
for model in adjusted_weights:
|
97 |
adjusted_weights[model] *= self.situation_weights["consensus"]
|
98 |
+
agent_logger.log("weight_optimization", "info", f"Adjusted weights after consensus boost: {adjusted_weights}")
|
99 |
|
100 |
# Check for conflicts
|
101 |
has_conflicts = self._has_conflicts(predictions)
|
102 |
if has_conflicts:
|
103 |
+
agent_logger.log("weight_optimization", "info", "Conflicts detected. Reducing weights for conflict.")
|
104 |
for model in adjusted_weights:
|
105 |
adjusted_weights[model] *= self.situation_weights["conflict"]
|
106 |
+
agent_logger.log("weight_optimization", "info", f"Adjusted weights after conflict reduction: {adjusted_weights}")
|
107 |
|
108 |
# Adjust based on confidence
|
109 |
logger.info("Adjusting weights based on model confidence scores.")
|
110 |
for model, confidence in confidence_scores.items():
|
111 |
if confidence > 0.8:
|
112 |
adjusted_weights[model] *= self.situation_weights["high_confidence"]
|
113 |
+
agent_logger.log("weight_optimization", "info", f"Model '{model}' has high confidence ({confidence:.2f}). Weight boosted.")
|
114 |
elif confidence < 0.5:
|
115 |
adjusted_weights[model] *= self.situation_weights["low_confidence"]
|
116 |
+
agent_logger.log("weight_optimization", "info", f"Model '{model}' has low confidence ({confidence:.2f}). Weight reduced.")
|
117 |
logger.info(f"Adjusted weights before normalization: {adjusted_weights}")
|
118 |
|
119 |
normalized_weights = self._normalize_weights(adjusted_weights)
|
|
|
122 |
|
123 |
def _has_consensus(self, predictions):
|
124 |
"""Check if models agree on prediction"""
|
125 |
+
agent_logger.log("weight_optimization", "info", "Checking for consensus among model predictions.")
|
126 |
non_none_predictions = [p.get("Label") for p in predictions.values() if p is not None and isinstance(p, dict) and p.get("Label") is not None and p.get("Label") != "Error"]
|
127 |
logger.debug(f"Non-none predictions for consensus check: {non_none_predictions}")
|
128 |
result = len(non_none_predictions) > 0 and len(set(non_none_predictions)) == 1
|
|
|
131 |
|
132 |
def _has_conflicts(self, predictions):
|
133 |
"""Check if models have conflicting predictions"""
|
134 |
+
agent_logger.log("weight_optimization", "info", "Checking for conflicts among model predictions.")
|
135 |
non_none_predictions = [p.get("Label") for p in predictions.values() if p is not None and isinstance(p, dict) and p.get("Label") is not None and p.get("Label") != "Error"]
|
136 |
logger.debug(f"Non-none predictions for conflict check: {non_none_predictions}")
|
137 |
result = len(non_none_predictions) > 1 and len(set(non_none_predictions)) > 1
|
|
|
140 |
|
141 |
def _normalize_weights(self, weights):
|
142 |
"""Normalize weights to sum to 1"""
|
143 |
+
agent_logger.log("weight_optimization", "info", "Normalizing weights.")
|
144 |
total = sum(weights.values())
|
145 |
if total == 0:
|
146 |
+
agent_logger.log("weight_optimization", "warning", "All weights became zero after adjustments. Reverting to equal base weights for registered models.")
|
147 |
# Revert to equal weights for all *registered* models if total becomes zero
|
148 |
num_registered_models = len(MODEL_REGISTRY)
|
149 |
if num_registered_models > 0:
|
|
|
151 |
else:
|
152 |
return {} # No models registered
|
153 |
normalized = {k: v/total for k, v in weights.items()}
|
154 |
+
agent_logger.log("weight_optimization", "info", f"Weights normalized. Total sum: {sum(normalized.values()):.2f}")
|
155 |
return normalized
|
agents/smart_agents.py
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
-
import
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
from PIL import Image # For image processing context
|
5 |
# import smolagents # Removed unused import
|
6 |
|
7 |
-
|
8 |
|
9 |
class ContextualIntelligenceAgent:
|
10 |
def __init__(self):
|
11 |
-
|
12 |
# This would be a more sophisticated model in a real scenario
|
13 |
self.context_rules = {
|
14 |
"high_resolution": {"min_width": 1920, "min_height": 1080, "tag": "high_resolution_image"},
|
@@ -23,48 +23,40 @@ class ContextualIntelligenceAgent:
|
|
23 |
}
|
24 |
|
25 |
def infer_context_tags(self, image_metadata: dict, model_predictions: dict) -> list[str]:
|
26 |
-
|
27 |
detected_tags = []
|
28 |
-
|
29 |
# Analyze image metadata
|
30 |
width = image_metadata.get("width", 0)
|
31 |
height = image_metadata.get("height", 0)
|
32 |
mode = image_metadata.get("mode", "RGB")
|
33 |
-
|
34 |
if width >= self.context_rules["high_resolution"]["min_width"] and \
|
35 |
height >= self.context_rules["high_resolution"]["min_height"]:
|
36 |
detected_tags.append(self.context_rules["high_resolution"]["tag"])
|
37 |
-
|
38 |
-
|
39 |
if width <= self.context_rules["low_resolution"]["max_width"] and \
|
40 |
height <= self.context_rules["low_resolution"]["max_height"]:
|
41 |
detected_tags.append(self.context_rules["low_resolution"]["tag"])
|
42 |
-
|
43 |
-
|
44 |
if mode == self.context_rules["grayscale"]["mode"]:
|
45 |
detected_tags.append(self.context_rules["grayscale"]["tag"])
|
46 |
-
|
47 |
-
|
48 |
# Analyze model predictions for general context
|
49 |
for model_id, prediction in model_predictions.items():
|
50 |
label = prediction.get("Label")
|
51 |
ai_score = prediction.get("AI Score", 0.0)
|
52 |
real_score = prediction.get("Real Score", 0.0)
|
53 |
-
|
54 |
if label and "potentially_natural_scene" not in detected_tags:
|
55 |
for keyword in self.context_rules["potentially_natural_scene"]["keywords"]:
|
56 |
if keyword in label and real_score >= self.context_rules["potentially_natural_scene"]["threshold"]:
|
57 |
detected_tags.append(self.context_rules["potentially_natural_scene"]["tag"])
|
58 |
-
|
59 |
break # Only add once
|
60 |
-
|
61 |
if label and "potentially_ai_generated" not in detected_tags:
|
62 |
for keyword in self.context_rules["potentially_ai_generated"]["keywords"]:
|
63 |
if keyword in label and ai_score >= self.context_rules["potentially_ai_generated"]["threshold"]:
|
64 |
detected_tags.append(self.context_rules["potentially_ai_generated"]["tag"])
|
65 |
-
|
66 |
break # Only add once
|
67 |
-
|
68 |
# Simulate simple scene detection based on general consensus if available
|
69 |
# This is a very basic simulation; a real system would use a separate scene classification model
|
70 |
if "potentially_natural_scene" in detected_tags and "potentially_ai_generated" not in detected_tags:
|
@@ -73,15 +65,14 @@ class ContextualIntelligenceAgent:
|
|
73 |
if real_score > 0.8: # Placeholder for actual image feature analysis
|
74 |
detected_tags.append(self.context_rules["outdoor"]["tag"])
|
75 |
detected_tags.append(self.context_rules["sunny"]["tag"])
|
76 |
-
|
77 |
-
|
78 |
-
logger.info(f"Inferred context tags: {detected_tags}")
|
79 |
return detected_tags
|
80 |
|
81 |
|
82 |
class ForensicAnomalyDetectionAgent:
|
83 |
def __init__(self):
|
84 |
-
|
85 |
self.anomaly_thresholds = {
|
86 |
"ELA": {"min_anomalies": 3, "max_error_std": 20}, # Example thresholds
|
87 |
"gradient": {"min_sharp_edges": 500},
|
@@ -89,38 +80,32 @@ class ForensicAnomalyDetectionAgent:
|
|
89 |
}
|
90 |
|
91 |
def analyze_forensic_outputs(self, forensic_output_descriptions: list[str]) -> dict:
|
92 |
-
|
93 |
anomalies_detected = []
|
94 |
summary_message = "No significant anomalies detected."
|
95 |
-
|
96 |
# Example: Check for ELA anomalies (simplified)
|
97 |
ela_anomalies = [desc for desc in forensic_output_descriptions if "ELA analysis" in desc and "enhanced contrast" in desc]
|
98 |
if len(ela_anomalies) > self.anomaly_thresholds["ELA"]["min_anomalies"]:
|
99 |
anomalies_detected.append("Multiple ELA passes indicate potential inconsistencies.")
|
100 |
-
|
101 |
-
|
102 |
# Example: Check for gradient anomalies (simplified)
|
103 |
gradient_anomalies = [desc for desc in forensic_output_descriptions if "Gradient processing" in desc]
|
104 |
if len(gradient_anomalies) > 1 and "Highlights edges and transitions" in gradient_anomalies[0]:
|
105 |
# This is a placeholder for actual image analysis, e.g., checking standard deviation of gradients
|
106 |
anomalies_detected.append("Gradient analysis shows unusual edge patterns.")
|
107 |
-
|
108 |
-
|
109 |
# Example: Check for MinMax anomalies (simplified)
|
110 |
minmax_anomalies = [desc for desc in forensic_output_descriptions if "MinMax processing" in desc]
|
111 |
if len(minmax_anomalies) > 1 and "Deviations in local pixel values" in minmax_anomalies[0]:
|
112 |
# Placeholder for actual analysis of minmax output, e.g., deviation variance
|
113 |
anomalies_detected.append("MinMax processing reveals subtle pixel deviations.")
|
114 |
-
|
115 |
-
|
116 |
if "Bit Plane extractor" in str(forensic_output_descriptions):
|
117 |
anomalies_detected.append("Bit Plane extraction performed.")
|
118 |
-
|
119 |
-
|
120 |
if anomalies_detected:
|
121 |
summary_message = "Potential anomalies detected: " + "; ".join(anomalies_detected)
|
122 |
-
|
123 |
else:
|
124 |
-
|
125 |
-
|
126 |
return {"anomalies": anomalies_detected, "summary": summary_message}
|
|
|
1 |
+
from utils.agent_logger import AgentLogger
|
2 |
import torch
|
3 |
import numpy as np
|
4 |
from PIL import Image # For image processing context
|
5 |
# import smolagents # Removed unused import
|
6 |
|
7 |
+
agent_logger = AgentLogger()
|
8 |
|
9 |
class ContextualIntelligenceAgent:
|
10 |
def __init__(self):
|
11 |
+
agent_logger.log("context_intelligence", "info", "Initializing ContextualIntelligenceAgent.")
|
12 |
# This would be a more sophisticated model in a real scenario
|
13 |
self.context_rules = {
|
14 |
"high_resolution": {"min_width": 1920, "min_height": 1080, "tag": "high_resolution_image"},
|
|
|
23 |
}
|
24 |
|
25 |
def infer_context_tags(self, image_metadata: dict, model_predictions: dict) -> list[str]:
|
26 |
+
agent_logger.log("context_intelligence", "info", "Inferring context tags from image metadata and model predictions.")
|
27 |
detected_tags = []
|
|
|
28 |
# Analyze image metadata
|
29 |
width = image_metadata.get("width", 0)
|
30 |
height = image_metadata.get("height", 0)
|
31 |
mode = image_metadata.get("mode", "RGB")
|
|
|
32 |
if width >= self.context_rules["high_resolution"]["min_width"] and \
|
33 |
height >= self.context_rules["high_resolution"]["min_height"]:
|
34 |
detected_tags.append(self.context_rules["high_resolution"]["tag"])
|
35 |
+
agent_logger.log("context_intelligence", "debug", f"Detected tag: {self.context_rules['high_resolution']['tag']}")
|
|
|
36 |
if width <= self.context_rules["low_resolution"]["max_width"] and \
|
37 |
height <= self.context_rules["low_resolution"]["max_height"]:
|
38 |
detected_tags.append(self.context_rules["low_resolution"]["tag"])
|
39 |
+
agent_logger.log("context_intelligence", "debug", f"Detected tag: {self.context_rules['low_resolution']['tag']}")
|
|
|
40 |
if mode == self.context_rules["grayscale"]["mode"]:
|
41 |
detected_tags.append(self.context_rules["grayscale"]["tag"])
|
42 |
+
agent_logger.log("context_intelligence", "debug", f"Detected tag: {self.context_rules['grayscale']['tag']}")
|
|
|
43 |
# Analyze model predictions for general context
|
44 |
for model_id, prediction in model_predictions.items():
|
45 |
label = prediction.get("Label")
|
46 |
ai_score = prediction.get("AI Score", 0.0)
|
47 |
real_score = prediction.get("Real Score", 0.0)
|
|
|
48 |
if label and "potentially_natural_scene" not in detected_tags:
|
49 |
for keyword in self.context_rules["potentially_natural_scene"]["keywords"]:
|
50 |
if keyword in label and real_score >= self.context_rules["potentially_natural_scene"]["threshold"]:
|
51 |
detected_tags.append(self.context_rules["potentially_natural_scene"]["tag"])
|
52 |
+
agent_logger.log("context_intelligence", "debug", f"Detected tag: {self.context_rules['potentially_natural_scene']['tag']}")
|
53 |
break # Only add once
|
|
|
54 |
if label and "potentially_ai_generated" not in detected_tags:
|
55 |
for keyword in self.context_rules["potentially_ai_generated"]["keywords"]:
|
56 |
if keyword in label and ai_score >= self.context_rules["potentially_ai_generated"]["threshold"]:
|
57 |
detected_tags.append(self.context_rules["potentially_ai_generated"]["tag"])
|
58 |
+
agent_logger.log("context_intelligence", "debug", f"Detected tag: {self.context_rules['potentially_ai_generated']['tag']}")
|
59 |
break # Only add once
|
|
|
60 |
# Simulate simple scene detection based on general consensus if available
|
61 |
# This is a very basic simulation; a real system would use a separate scene classification model
|
62 |
if "potentially_natural_scene" in detected_tags and "potentially_ai_generated" not in detected_tags:
|
|
|
65 |
if real_score > 0.8: # Placeholder for actual image feature analysis
|
66 |
detected_tags.append(self.context_rules["outdoor"]["tag"])
|
67 |
detected_tags.append(self.context_rules["sunny"]["tag"])
|
68 |
+
agent_logger.log("context_intelligence", "debug", f"Simulated tags: {self.context_rules['outdoor']['tag']},{self.context_rules['sunny']['tag']}")
|
69 |
+
agent_logger.log("context_intelligence", "info", f"Inferred context tags: {detected_tags}")
|
|
|
70 |
return detected_tags
|
71 |
|
72 |
|
73 |
class ForensicAnomalyDetectionAgent:
|
74 |
def __init__(self):
|
75 |
+
agent_logger.log("forensic_anomaly_detection", "info", "Initializing ForensicAnomalyDetectionAgent.")
|
76 |
self.anomaly_thresholds = {
|
77 |
"ELA": {"min_anomalies": 3, "max_error_std": 20}, # Example thresholds
|
78 |
"gradient": {"min_sharp_edges": 500},
|
|
|
80 |
}
|
81 |
|
82 |
def analyze_forensic_outputs(self, forensic_output_descriptions: list[str]) -> dict:
|
83 |
+
agent_logger.log("forensic_anomaly_detection", "info", "Analyzing forensic outputs for anomalies.")
|
84 |
anomalies_detected = []
|
85 |
summary_message = "No significant anomalies detected."
|
|
|
86 |
# Example: Check for ELA anomalies (simplified)
|
87 |
ela_anomalies = [desc for desc in forensic_output_descriptions if "ELA analysis" in desc and "enhanced contrast" in desc]
|
88 |
if len(ela_anomalies) > self.anomaly_thresholds["ELA"]["min_anomalies"]:
|
89 |
anomalies_detected.append("Multiple ELA passes indicate potential inconsistencies.")
|
90 |
+
agent_logger.log("forensic_anomaly_detection", "warning", "Detected multiple ELA passes indicating potential inconsistencies.")
|
|
|
91 |
# Example: Check for gradient anomalies (simplified)
|
92 |
gradient_anomalies = [desc for desc in forensic_output_descriptions if "Gradient processing" in desc]
|
93 |
if len(gradient_anomalies) > 1 and "Highlights edges and transitions" in gradient_anomalies[0]:
|
94 |
# This is a placeholder for actual image analysis, e.g., checking standard deviation of gradients
|
95 |
anomalies_detected.append("Gradient analysis shows unusual edge patterns.")
|
96 |
+
agent_logger.log("forensic_anomaly_detection", "warning", "Detected unusual edge patterns from gradient analysis.")
|
|
|
97 |
# Example: Check for MinMax anomalies (simplified)
|
98 |
minmax_anomalies = [desc for desc in forensic_output_descriptions if "MinMax processing" in desc]
|
99 |
if len(minmax_anomalies) > 1 and "Deviations in local pixel values" in minmax_anomalies[0]:
|
100 |
# Placeholder for actual analysis of minmax output, e.g., deviation variance
|
101 |
anomalies_detected.append("MinMax processing reveals subtle pixel deviations.")
|
102 |
+
agent_logger.log("forensic_anomaly_detection", "warning", "Detected subtle pixel deviations from MinMax processing.")
|
|
|
103 |
if "Bit Plane extractor" in str(forensic_output_descriptions):
|
104 |
anomalies_detected.append("Bit Plane extraction performed.")
|
105 |
+
agent_logger.log("forensic_anomaly_detection", "info", "Bit Plane extraction performed.")
|
|
|
106 |
if anomalies_detected:
|
107 |
summary_message = "Potential anomalies detected: " + "; ".join(anomalies_detected)
|
108 |
+
agent_logger.log("forensic_anomaly_detection", "warning", f"Forensic anomaly detection summary: {summary_message}")
|
109 |
else:
|
110 |
+
agent_logger.log("forensic_anomaly_detection", "info", f"Forensic anomaly detection summary: {summary_message}")
|
|
|
111 |
return {"anomalies": anomalies_detected, "summary": summary_message}
|
app.py
CHANGED
@@ -41,9 +41,11 @@ os.environ['HF_HUB_CACHE'] = './models'
|
|
41 |
|
42 |
|
43 |
# --- Gradio Log Handler ---
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
47 |
|
48 |
LOCAL_LOG_DIR = "./hf_inference_logs"
|
49 |
HF_DATASET_NAME="aiwithoutborders-xyz/degentic_rd0"
|
@@ -73,32 +75,6 @@ class CustomFormatter(logging.Formatter):
|
|
73 |
return formatter.format(record)
|
74 |
|
75 |
|
76 |
-
formatter = CustomFormatter()
|
77 |
-
|
78 |
-
log_file = "/tmp/gradio_log.txt"
|
79 |
-
Path(log_file).touch()
|
80 |
-
|
81 |
-
ch = logging.FileHandler(log_file)
|
82 |
-
ch.setLevel(logging.DEBUG)
|
83 |
-
ch.setFormatter(formatter)
|
84 |
-
|
85 |
-
logger = logging.getLogger("gradio_log")
|
86 |
-
logger.setLevel(logging.DEBUG)
|
87 |
-
for handler in logger.handlers:
|
88 |
-
logger.removeHandler(handler)
|
89 |
-
logger.addHandler(ch)
|
90 |
-
|
91 |
-
|
92 |
-
logger.info("The logs will be displayed in here.")
|
93 |
-
|
94 |
-
|
95 |
-
def create_log_handler(level):
|
96 |
-
def l(text):
|
97 |
-
getattr(logger, level)(text)
|
98 |
-
|
99 |
-
return l
|
100 |
-
|
101 |
-
|
102 |
|
103 |
# Custom JSON Encoder to handle numpy types
|
104 |
class NumpyEncoder(json.JSONEncoder):
|
@@ -270,7 +246,7 @@ def full_prediction(img, confidence_threshold, rotate_degrees, noise_level, shar
|
|
270 |
logger.warning(f"Could not convert original numpy image to PIL for gallery: {e}")
|
271 |
|
272 |
# Yield initial state with augmented image and empty model predictions
|
273 |
-
yield img_pil, cleaned_forensics_images, table_rows, "[]", "<div style='font-size: 2.2em; font-weight: bold;padding: 10px;'>Consensus: <span style='color:orange'>UNCERTAIN</span></div>"
|
274 |
|
275 |
|
276 |
# Stream results as each model finishes
|
@@ -305,7 +281,7 @@ def full_prediction(img, confidence_threshold, rotate_degrees, noise_level, shar
|
|
305 |
result.get("Label", "Error")
|
306 |
])
|
307 |
# Yield partial results: only update the table, others are None
|
308 |
-
yield None, cleaned_forensics_images, table_rows, None, None # Keep cleaned_forensics_images as is (only augmented image for now)
|
309 |
|
310 |
# Multi-threaded forensic processing
|
311 |
def _run_forensic_task(task_func, img_input, description, **kwargs):
|
@@ -343,7 +319,7 @@ def full_prediction(img, confidence_threshold, rotate_degrees, noise_level, shar
|
|
343 |
forensic_output_descriptions.append(description) # Keep track of descriptions for anomaly agent
|
344 |
|
345 |
# Yield partial results: update gallery
|
346 |
-
yield None, cleaned_forensics_images, table_rows, None, None
|
347 |
|
348 |
# After all models, compute the rest as before
|
349 |
image_data_for_context = {
|
@@ -363,7 +339,7 @@ def full_prediction(img, confidence_threshold, rotate_degrees, noise_level, shar
|
|
363 |
# "Bit Plane extractor: Visualization of individual bit planes from different color channels."
|
364 |
]
|
365 |
detected_context_tags = context_agent.infer_context_tags(image_data_for_context, model_predictions_raw)
|
366 |
-
|
367 |
adjusted_weights = weight_manager.adjust_weights(model_predictions_raw, confidence_scores, context_tags=detected_context_tags)
|
368 |
weighted_predictions = {"AI": 0.0, "REAL": 0.0, "UNCERTAIN": 0.0}
|
369 |
for model_id, prediction in model_predictions_raw.items():
|
@@ -398,8 +374,9 @@ def full_prediction(img, confidence_threshold, rotate_degrees, noise_level, shar
|
|
398 |
# "MinMax processing (Radius=6): Deviations in local pixel values.",
|
399 |
# # "Bit Plane extractor: Visualization of individual bit planes from different color channels."
|
400 |
# ]
|
|
|
401 |
anomaly_detection_results = anomaly_agent.analyze_forensic_outputs(forensic_output_descriptions)
|
402 |
-
|
403 |
consensus_html = f"<div style='font-size: 2.2em; font-weight: bold;padding: 10px;'>Consensus: <span style='color:{'red' if final_prediction_label == 'AI' else ('green' if final_prediction_label == 'REAL' else 'orange')}'>{final_prediction_label}</span></div>"
|
404 |
inference_params = {
|
405 |
"confidence_threshold": confidence_threshold,
|
@@ -440,41 +417,82 @@ def full_prediction(img, confidence_threshold, rotate_degrees, noise_level, shar
|
|
440 |
human_feedback=None
|
441 |
)
|
442 |
|
443 |
-
|
444 |
for i, res_dict in enumerate(results):
|
445 |
for key in ["AI Score", "Real Score"]:
|
446 |
value = res_dict.get(key)
|
447 |
if isinstance(value, np.float32):
|
448 |
res_dict[key] = float(value)
|
449 |
-
|
450 |
json_results = json.dumps(results, cls=NumpyEncoder)
|
451 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
452 |
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
gr.
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
gr.
|
465 |
-
|
466 |
-
label="
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
478 |
# def echo_headers(x, request: gr.Request):
|
479 |
# print(dict(request.headers))
|
480 |
# return str(dict(request.headers))
|
@@ -520,6 +538,7 @@ community_forensics_preview = gr.Interface(
|
|
520 |
# title="Leaderboard",
|
521 |
# api_name="leaderboard"
|
522 |
# )
|
|
|
523 |
def simple_prediction(img):
|
524 |
"""
|
525 |
Quick and simple deepfake or real image prediction by the strongest open-source model on the hub.
|
@@ -633,32 +652,35 @@ minmax_processing_interface = gr.Interface(
|
|
633 |
# log_queue.clear() # Clear the queue after retrieving
|
634 |
# return "\n".join(logs)
|
635 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
636 |
|
637 |
-
|
638 |
-
[
|
639 |
-
detection_model_eval_playground,
|
640 |
-
community_forensics_preview,
|
641 |
-
noise_estimation_interface,
|
642 |
-
bit_plane_interface,
|
643 |
-
ela_interface,
|
644 |
-
gradient_processing_interface,
|
645 |
-
minmax_processing_interface,
|
646 |
-
# gr.Textbox(label="Agent Logs", interactive=False, lines=5, max_lines=20, autoscroll=True) # New textbox for logs
|
647 |
-
],
|
648 |
-
[
|
649 |
-
"Run Ensemble Prediction",
|
650 |
-
"Open-Source SOTA Model",
|
651 |
-
"Wavelet Blocking Noise Estimation",
|
652 |
-
"Bit Plane Values",
|
653 |
-
"Error Level Analysis (ELA)",
|
654 |
-
"Gradient Processing",
|
655 |
-
"MinMax Processing",
|
656 |
-
# "Agent Logs" # New tab title
|
657 |
-
],
|
658 |
-
title="Deepfake Detection & Forensics Tools",
|
659 |
-
theme=None,
|
660 |
|
661 |
-
|
662 |
footerMD = """
|
663 |
## ⚠️ ENSEMBLE TEAM IN TRAINING ⚠️ \n\n
|
664 |
|
@@ -673,13 +695,8 @@ footer = gr.Markdown(footerMD, elem_classes="footer")
|
|
673 |
|
674 |
with gr.Blocks() as app:
|
675 |
demo.render()
|
676 |
-
|
677 |
-
ltext = gr.Textbox(label="Enter text to write to log file")
|
678 |
-
for l in ["debug", "info", "warning", "error", "critical"]:
|
679 |
-
button = gr.Button(f"log as {l}")
|
680 |
-
button.click(fn=create_log_handler(l), inputs=ltext)
|
681 |
-
Log(log_file, dark=True).render()
|
682 |
footer.render()
|
683 |
|
684 |
|
685 |
-
app.queue(max_size=10, default_concurrency_limit=2).launch(mcp_server=True)
|
|
|
41 |
|
42 |
|
43 |
# --- Gradio Log Handler ---
|
44 |
+
|
45 |
+
# --- Per-Agent Logging Setup ---
|
46 |
+
from utils.agent_logger import AgentLogger, AGENT_LOG_FILES
|
47 |
+
agent_logger = AgentLogger()
|
48 |
+
# --- End Per-Agent Logging Setup ---
|
49 |
|
50 |
LOCAL_LOG_DIR = "./hf_inference_logs"
|
51 |
HF_DATASET_NAME="aiwithoutborders-xyz/degentic_rd0"
|
|
|
75 |
return formatter.format(record)
|
76 |
|
77 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
|
79 |
# Custom JSON Encoder to handle numpy types
|
80 |
class NumpyEncoder(json.JSONEncoder):
|
|
|
246 |
logger.warning(f"Could not convert original numpy image to PIL for gallery: {e}")
|
247 |
|
248 |
# Yield initial state with augmented image and empty model predictions
|
249 |
+
yield img_pil, cleaned_forensics_images, table_rows, "[]", "<div style='font-size: 2.2em; font-weight: bold;padding: 10px;'>Consensus: <span style='color:orange'>UNCERTAIN</span></div>", None, None, None, None, None
|
250 |
|
251 |
|
252 |
# Stream results as each model finishes
|
|
|
281 |
result.get("Label", "Error")
|
282 |
])
|
283 |
# Yield partial results: only update the table, others are None
|
284 |
+
yield None, cleaned_forensics_images, table_rows, None, None, None, None, None, None, None # Keep cleaned_forensics_images as is (only augmented image for now)
|
285 |
|
286 |
# Multi-threaded forensic processing
|
287 |
def _run_forensic_task(task_func, img_input, description, **kwargs):
|
|
|
319 |
forensic_output_descriptions.append(description) # Keep track of descriptions for anomaly agent
|
320 |
|
321 |
# Yield partial results: update gallery
|
322 |
+
yield None, cleaned_forensics_images, table_rows, None, None, None, None, None, None, None
|
323 |
|
324 |
# After all models, compute the rest as before
|
325 |
image_data_for_context = {
|
|
|
339 |
# "Bit Plane extractor: Visualization of individual bit planes from different color channels."
|
340 |
]
|
341 |
detected_context_tags = context_agent.infer_context_tags(image_data_for_context, model_predictions_raw)
|
342 |
+
agent_logger.log("context_intelligence", "info", f"Detected context tags: {detected_context_tags}")
|
343 |
adjusted_weights = weight_manager.adjust_weights(model_predictions_raw, confidence_scores, context_tags=detected_context_tags)
|
344 |
weighted_predictions = {"AI": 0.0, "REAL": 0.0, "UNCERTAIN": 0.0}
|
345 |
for model_id, prediction in model_predictions_raw.items():
|
|
|
374 |
# "MinMax processing (Radius=6): Deviations in local pixel values.",
|
375 |
# # "Bit Plane extractor: Visualization of individual bit planes from different color channels."
|
376 |
# ]
|
377 |
+
|
378 |
anomaly_detection_results = anomaly_agent.analyze_forensic_outputs(forensic_output_descriptions)
|
379 |
+
agent_logger.log("forensic_anomaly_detection", "info", f"Forensic anomaly detection: {anomaly_detection_results['summary']}")
|
380 |
consensus_html = f"<div style='font-size: 2.2em; font-weight: bold;padding: 10px;'>Consensus: <span style='color:{'red' if final_prediction_label == 'AI' else ('green' if final_prediction_label == 'REAL' else 'orange')}'>{final_prediction_label}</span></div>"
|
381 |
inference_params = {
|
382 |
"confidence_threshold": confidence_threshold,
|
|
|
417 |
human_feedback=None
|
418 |
)
|
419 |
|
420 |
+
agent_logger.log("ensemble_monitor", "info", f"Cleaned forensic images types: {[type(img) for img in cleaned_forensics_images]}")
|
421 |
for i, res_dict in enumerate(results):
|
422 |
for key in ["AI Score", "Real Score"]:
|
423 |
value = res_dict.get(key)
|
424 |
if isinstance(value, np.float32):
|
425 |
res_dict[key] = float(value)
|
426 |
+
agent_logger.log("ensemble_monitor", "info", f"Converted {key} for result {i} from numpy.float32 to float.")
|
427 |
json_results = json.dumps(results, cls=NumpyEncoder)
|
428 |
+
# Read log file contents for each agent
|
429 |
+
def read_log_file(path):
|
430 |
+
try:
|
431 |
+
with open(path, "r") as f:
|
432 |
+
return f.read()
|
433 |
+
except Exception:
|
434 |
+
return ""
|
435 |
+
|
436 |
+
yield (
|
437 |
+
img_pil,
|
438 |
+
cleaned_forensics_images,
|
439 |
+
table_rows,
|
440 |
+
json_results,
|
441 |
+
consensus_html,
|
442 |
+
read_log_file(AGENT_LOG_FILES["context_intelligence"]),
|
443 |
+
read_log_file(AGENT_LOG_FILES["ensemble_monitor"]),
|
444 |
+
read_log_file(AGENT_LOG_FILES["weight_optimization"]),
|
445 |
+
read_log_file(AGENT_LOG_FILES["system_health"]),
|
446 |
+
read_log_file(AGENT_LOG_FILES["forensic_anomaly_detection"])
|
447 |
+
)
|
448 |
|
449 |
+
with gr.Blocks() as detection_model_eval_playground:
|
450 |
+
gr.Markdown("# Multi-Model Ensemble + Agentic Coordinated Deepfake Detection (Paper in Progress)")
|
451 |
+
gr.Markdown("The detection of AI-generated images has entered a critical inflection point. While existing solutions struggle with outdated datasets and inflated claims, our approach prioritizes agility, community collaboration, and an offensive approach to deepfake detection.")
|
452 |
+
with gr.Row():
|
453 |
+
with gr.Column():
|
454 |
+
img_input = gr.Image(label="Upload Image to Analyze", sources=['upload', 'webcam'], type='filepath')
|
455 |
+
confidence_slider = gr.Slider(0.0, 1.0, value=0.7, step=0.05, label="Confidence Threshold")
|
456 |
+
rotate_slider = gr.Slider(0, 45, value=0, step=1, label="Rotate Degrees", visible=False)
|
457 |
+
noise_slider = gr.Slider(0, 50, value=0, step=1, label="Noise Level", visible=False)
|
458 |
+
sharpen_slider = gr.Slider(0, 50, value=0, step=1, label="Sharpen Strength", visible=False)
|
459 |
+
predict_btn = gr.Button("Run Prediction")
|
460 |
+
with gr.Column():
|
461 |
+
processed_img = gr.Image(label="Processed Image", visible=False)
|
462 |
+
gallery = gr.Gallery(label="Post Processed Images", visible=True, columns=[4], rows=[2], container=False, height="auto", object_fit="contain", elem_id="post-gallery")
|
463 |
+
predictions_df = gr.Dataframe(
|
464 |
+
label="Model Predictions",
|
465 |
+
headers=["Arch / Dataset", "By", "AI", "Real", "Label"],
|
466 |
+
datatype=["str", "str", "number", "number", "str"]
|
467 |
+
)
|
468 |
+
raw_json = gr.JSON(label="Raw Model Results", visible=False)
|
469 |
+
consensus_md = gr.Markdown(label="Consensus", value="")
|
470 |
+
with gr.Accordion("Agent Logs", open=False, elem_id="agent-logs-accordion"):
|
471 |
+
with gr.Row():
|
472 |
+
with gr.Column(scale=1):
|
473 |
+
context_intelligence_log = Log(label="Context Log", dark=True, xterm_font_size=12, log_file=AGENT_LOG_FILES["context_intelligence"])
|
474 |
+
ensemble_monitor_log = Log(label="Ensemble Monitor Log", dark=True, xterm_font_size=12, log_file=AGENT_LOG_FILES["ensemble_monitor"])
|
475 |
+
with gr.Column(scale=1):
|
476 |
+
weight_optimization_log = Log(label="Weight Optimization Log", dark=True, xterm_font_size=12, log_file=AGENT_LOG_FILES["weight_optimization"])
|
477 |
+
forensic_log = Log(label="Forensic Anomaly Log", dark=True, xterm_font_size=12, log_file=AGENT_LOG_FILES["forensic_anomaly_detection"])
|
478 |
+
system_health_log = Log(label="System Health Log", dark=True, xterm_font_size=12, log_file=AGENT_LOG_FILES["system_health"], visible=False)
|
479 |
+
|
480 |
+
predict_btn.click(
|
481 |
+
full_prediction,
|
482 |
+
inputs=[img_input, confidence_slider, rotate_slider, noise_slider, sharpen_slider],
|
483 |
+
outputs=[
|
484 |
+
processed_img,
|
485 |
+
gallery,
|
486 |
+
predictions_df,
|
487 |
+
raw_json,
|
488 |
+
consensus_md,
|
489 |
+
context_intelligence_log,
|
490 |
+
ensemble_monitor_log,
|
491 |
+
weight_optimization_log,
|
492 |
+
system_health_log,
|
493 |
+
forensic_log
|
494 |
+
]
|
495 |
+
)
|
496 |
# def echo_headers(x, request: gr.Request):
|
497 |
# print(dict(request.headers))
|
498 |
# return str(dict(request.headers))
|
|
|
538 |
# title="Leaderboard",
|
539 |
# api_name="leaderboard"
|
540 |
# )
|
541 |
+
|
542 |
def simple_prediction(img):
|
543 |
"""
|
544 |
Quick and simple deepfake or real image prediction by the strongest open-source model on the hub.
|
|
|
652 |
# log_queue.clear() # Clear the queue after retrieving
|
653 |
# return "\n".join(logs)
|
654 |
|
655 |
+
demo = detection_model_eval_playground
|
656 |
+
|
657 |
+
# demo = gr.TabbedInterface(
|
658 |
+
# [
|
659 |
+
# detection_model_eval_playground,
|
660 |
+
# community_forensics_preview,
|
661 |
+
# noise_estimation_interface,
|
662 |
+
# bit_plane_interface,
|
663 |
+
# ela_interface,
|
664 |
+
# gradient_processing_interface,
|
665 |
+
# minmax_processing_interface,
|
666 |
+
# # gr.Textbox(label="Agent Logs", interactive=False, lines=5, max_lines=20, autoscroll=True) # New textbox for logs
|
667 |
+
# ],
|
668 |
+
# [
|
669 |
+
# "Run Ensemble Prediction",
|
670 |
+
# "Open-Source SOTA Model",
|
671 |
+
# "Wavelet Blocking Noise Estimation",
|
672 |
+
# "Bit Plane Values",
|
673 |
+
# "Error Level Analysis (ELA)",
|
674 |
+
# "Gradient Processing",
|
675 |
+
# "MinMax Processing",
|
676 |
+
# # "Agent Logs" # New tab title
|
677 |
+
# ],
|
678 |
+
# title="Deepfake Detection & Forensics Tools",
|
679 |
+
# theme=None,
|
680 |
|
681 |
+
# )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
682 |
|
683 |
+
|
684 |
footerMD = """
|
685 |
## ⚠️ ENSEMBLE TEAM IN TRAINING ⚠️ \n\n
|
686 |
|
|
|
695 |
|
696 |
with gr.Blocks() as app:
|
697 |
demo.render()
|
698 |
+
|
|
|
|
|
|
|
|
|
|
|
699 |
footer.render()
|
700 |
|
701 |
|
702 |
+
app.queue(max_size=10, default_concurrency_limit=2).launch(mcp_server=True)
|
temp_gradio_input.png
DELETED
Git LFS Details
|
utils/agent_logger.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
AGENT_LOG_FILES = {
|
5 |
+
"ensemble_monitor": "./agent_logs/ensemble_monitor.log",
|
6 |
+
"weight_optimization": "./agent_logs/weight_optimization.log",
|
7 |
+
"system_health": "./agent_logs/system_health.log",
|
8 |
+
"context_intelligence": "./agent_logs/context_intelligence.log",
|
9 |
+
"forensic_anomaly_detection": "./agent_logs/forensic_anomaly_detection.log",
|
10 |
+
}
|
11 |
+
|
12 |
+
class AgentLogger:
|
13 |
+
def __init__(self, agent_log_files=None):
|
14 |
+
self.agent_log_files = agent_log_files or AGENT_LOG_FILES
|
15 |
+
self.loggers = {}
|
16 |
+
self._setup_loggers()
|
17 |
+
|
18 |
+
def _setup_loggers(self):
|
19 |
+
for agent, log_file in self.agent_log_files.items():
|
20 |
+
Path(log_file).parent.mkdir(parents=True, exist_ok=True)
|
21 |
+
Path(log_file).touch(exist_ok=True)
|
22 |
+
logger = logging.getLogger(f"agent_log.{agent}")
|
23 |
+
logger.setLevel(logging.DEBUG)
|
24 |
+
# Remove existing handlers to avoid duplicate logs
|
25 |
+
for handler in logger.handlers:
|
26 |
+
logger.removeHandler(handler)
|
27 |
+
handler = logging.FileHandler(log_file)
|
28 |
+
handler.setLevel(logging.DEBUG)
|
29 |
+
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)")
|
30 |
+
handler.setFormatter(formatter)
|
31 |
+
logger.addHandler(handler)
|
32 |
+
self.loggers[agent] = logger
|
33 |
+
|
34 |
+
def log(self, agent, level, message):
|
35 |
+
logger = self.loggers.get(agent)
|
36 |
+
if logger:
|
37 |
+
getattr(logger, level)(message)
|
38 |
+
else:
|
39 |
+
raise ValueError(f"No logger found for agent: {agent}")
|
40 |
+
|
41 |
+
def get_log_file(self, agent):
|
42 |
+
return self.agent_log_files.get(agent)
|
43 |
+
|
44 |
+
# Usage Example:
|
45 |
+
# agent_logger = AgentLogger()
|
46 |
+
# agent_logger.log("ensemble_monitor", "info", "Ensemble monitoring started.")
|
47 |
+
# log_file_path = agent_logger.get_log_file("ensemble_monitor")
|