File size: 8,266 Bytes
a61db6a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
import json
import pandas as pd
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import numpy as np
import re
from typing import Dict, List, Tuple
import logging
from collections import defaultdict
def setup_logging():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging.getLogger(__name__)
class ModelEvaluator:
def __init__(self):
self.logger = setup_logging()
self.attack_types = {
'benign': ['benign', 'normal', 'legitimate'],
'dos': ['dos', 'denial of service'],
'ddos': ['ddos', 'distributed denial of service'],
'injection': ['injection', 'sql injection', 'command injection'],
'scanning': ['scanning', 'port scan', 'network scan'],
'password': ['password', 'brute force', 'credential'],
'mitm': ['mitm', 'man in the middle'],
'backdoor': ['backdoor'],
'ransomware': ['ransomware'],
'xss': ['xss', 'cross site scripting']
}
def extract_prediction(self, analysis_text: str) -> str:
"""Extract the predicted attack type from model output"""
analysis_text = analysis_text.lower()
# First check if there's an explicit classification
match = re.search(r"classified as (\w+)", analysis_text)
if match:
return match.group(1)
# Check for each attack type in the text
for attack_type, keywords in self.attack_types.items():
for keyword in keywords:
if keyword in analysis_text:
return attack_type
# Default to benign if no attack type is found
return 'benign'
def evaluate_results(self, results_file: str, test_data_file: str) -> Dict:
"""Evaluate model predictions against ground truth"""
# Load results and test data
with open(results_file, 'r') as f:
results = json.load(f)
test_df = pd.read_csv(test_data_file)
# Extract predictions and true labels
predictions = []
true_labels = []
confidence_scores = defaultdict(list)
for result in results:
flow_id = result['flow_id']
true_label = test_df.iloc[flow_id]['Attack'].lower()
prediction = self.extract_prediction(result['analysis'])
predictions.append(prediction)
true_labels.append(true_label)
# Track prediction confidence
confidence = 1.0
if 'possibly' in result['analysis'].lower() or 'potential' in result['analysis'].lower():
confidence = 0.7
if 'suspicious' in result['analysis'].lower():
confidence = 0.8
confidence_scores[prediction].append(confidence)
# Calculate metrics
accuracy = float(accuracy_score(true_labels, predictions))
class_report = classification_report(true_labels, predictions,
labels=list(self.attack_types.keys()),
output_dict=True)
conf_matrix = confusion_matrix(true_labels, predictions,
labels=list(self.attack_types.keys()))
# Calculate average confidence per class
avg_confidence = {k: float(np.mean(v)) if v else 0.0
for k, v in confidence_scores.items()}
# Analyze error patterns
error_analysis = self.analyze_errors(true_labels, predictions, results)
# Prepare metrics for JSON serialization
metrics = {
'accuracy': accuracy,
'classification_report': {},
'confusion_matrix': conf_matrix.tolist(),
'average_confidence': avg_confidence,
'error_analysis': error_analysis
}
# Process classification report for JSON serialization
for class_name, metrics_dict in class_report.items():
if isinstance(metrics_dict, dict):
metrics['classification_report'][class_name] = {
k: float(v) for k, v in metrics_dict.items()
if isinstance(v, (int, float, np.number))
}
return metrics
def analyze_errors(self, true_labels: List[str], predictions: List[str],
results: List[Dict]) -> Dict:
"""Analyze patterns in model errors"""
error_patterns = {
'false_positives': defaultdict(list),
'false_negatives': defaultdict(list),
}
for idx, (true, pred, result) in enumerate(zip(true_labels, predictions, results)):
if true != pred:
# Track the specific flow details for this error
flow_details = {
'flow_id': result['flow_id'],
'source_ip': result['source_ip'],
'destination_ip': result['destination_ip'],
'model_explanation': result['analysis']
}
# Categorize error type
if true == 'benign':
error_patterns['false_positives'][pred].append(flow_details)
else:
error_patterns['false_negatives'][true].append(flow_details)
# Convert defaultdict to regular dict for JSON serialization
return {
'false_positives': dict(error_patterns['false_positives']),
'false_negatives': dict(error_patterns['false_negatives'])
}
def print_evaluation_summary(self, metrics: Dict):
"""Print a human-readable summary of evaluation metrics"""
self.logger.info("\n=== Model Evaluation Summary ===")
# Overall Accuracy
self.logger.info(f"\nOverall Accuracy: {metrics['accuracy']:.4f}")
# Per-class Performance
self.logger.info("\nPer-class Performance:")
class_metrics = metrics['classification_report']
for class_name in self.attack_types.keys():
if class_name in class_metrics:
metrics_dict = class_metrics[class_name]
self.logger.info(f"\n{class_name.upper()}:")
self.logger.info(f" Precision: {metrics_dict.get('precision', 0):.4f}")
self.logger.info(f" Recall: {metrics_dict.get('recall', 0):.4f}")
self.logger.info(f" F1-Score: {metrics_dict.get('f1-score', 0):.4f}")
if class_name in metrics['average_confidence']:
self.logger.info(f" Avg Confidence: {metrics['average_confidence'][class_name]:.4f}")
# Error Analysis Summary
self.logger.info("\nError Analysis Summary:")
error_analysis = metrics['error_analysis']
# False Positives
self.logger.info("\nMost Common False Positives:")
for attack_type, errors in error_analysis['false_positives'].items():
self.logger.info(f" {attack_type}: {len(errors)} instances")
# False Negatives
self.logger.info("\nMost Common False Negatives:")
for attack_type, errors in error_analysis['false_negatives'].items():
self.logger.info(f" {attack_type}: {len(errors)} instances")
def main():
logger = setup_logging()
# Initialize evaluator
evaluator = ModelEvaluator()
try:
# Evaluate results
metrics = evaluator.evaluate_results(
results_file='analysis_results.json',
test_data_file='data/test.csv'
)
# Print evaluation summary
evaluator.print_evaluation_summary(metrics)
# Save detailed metrics
with open('evaluation_metrics.json', 'w') as f:
json.dump(metrics, f, indent=2)
logger.info("\nSaved detailed metrics to evaluation_metrics.json")
except Exception as e:
logger.error(f"Error during evaluation: {str(e)}")
raise
if __name__ == "__main__":
main() |