|
import json |
|
import pandas as pd |
|
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score |
|
import numpy as np |
|
import re |
|
from typing import Dict, List, Tuple |
|
import logging |
|
from collections import defaultdict |
|
|
|
def setup_logging(): |
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(levelname)s - %(message)s' |
|
) |
|
return logging.getLogger(__name__) |
|
|
|
class ModelEvaluator: |
|
def __init__(self): |
|
self.logger = setup_logging() |
|
self.attack_types = { |
|
'benign': ['benign', 'normal', 'legitimate'], |
|
'dos': ['dos', 'denial of service'], |
|
'ddos': ['ddos', 'distributed denial of service'], |
|
'injection': ['injection', 'sql injection', 'command injection'], |
|
'scanning': ['scanning', 'port scan', 'network scan'], |
|
'password': ['password', 'brute force', 'credential'], |
|
'mitm': ['mitm', 'man in the middle'], |
|
'backdoor': ['backdoor'], |
|
'ransomware': ['ransomware'], |
|
'xss': ['xss', 'cross site scripting'] |
|
} |
|
|
|
def extract_prediction(self, analysis_text: str) -> str: |
|
"""Extract the predicted attack type from model output""" |
|
analysis_text = analysis_text.lower() |
|
|
|
|
|
match = re.search(r"classified as (\w+)", analysis_text) |
|
if match: |
|
return match.group(1) |
|
|
|
|
|
for attack_type, keywords in self.attack_types.items(): |
|
for keyword in keywords: |
|
if keyword in analysis_text: |
|
return attack_type |
|
|
|
|
|
return 'benign' |
|
|
|
def evaluate_results(self, results_file: str, test_data_file: str) -> Dict: |
|
"""Evaluate model predictions against ground truth""" |
|
|
|
with open(results_file, 'r') as f: |
|
results = json.load(f) |
|
test_df = pd.read_csv(test_data_file) |
|
|
|
|
|
predictions = [] |
|
true_labels = [] |
|
confidence_scores = defaultdict(list) |
|
|
|
for result in results: |
|
flow_id = result['flow_id'] |
|
true_label = test_df.iloc[flow_id]['Attack'].lower() |
|
prediction = self.extract_prediction(result['analysis']) |
|
|
|
predictions.append(prediction) |
|
true_labels.append(true_label) |
|
|
|
|
|
confidence = 1.0 |
|
if 'possibly' in result['analysis'].lower() or 'potential' in result['analysis'].lower(): |
|
confidence = 0.7 |
|
if 'suspicious' in result['analysis'].lower(): |
|
confidence = 0.8 |
|
confidence_scores[prediction].append(confidence) |
|
|
|
|
|
accuracy = float(accuracy_score(true_labels, predictions)) |
|
class_report = classification_report(true_labels, predictions, |
|
labels=list(self.attack_types.keys()), |
|
output_dict=True) |
|
conf_matrix = confusion_matrix(true_labels, predictions, |
|
labels=list(self.attack_types.keys())) |
|
|
|
|
|
avg_confidence = {k: float(np.mean(v)) if v else 0.0 |
|
for k, v in confidence_scores.items()} |
|
|
|
|
|
error_analysis = self.analyze_errors(true_labels, predictions, results) |
|
|
|
|
|
metrics = { |
|
'accuracy': accuracy, |
|
'classification_report': {}, |
|
'confusion_matrix': conf_matrix.tolist(), |
|
'average_confidence': avg_confidence, |
|
'error_analysis': error_analysis |
|
} |
|
|
|
|
|
for class_name, metrics_dict in class_report.items(): |
|
if isinstance(metrics_dict, dict): |
|
metrics['classification_report'][class_name] = { |
|
k: float(v) for k, v in metrics_dict.items() |
|
if isinstance(v, (int, float, np.number)) |
|
} |
|
|
|
return metrics |
|
|
|
def analyze_errors(self, true_labels: List[str], predictions: List[str], |
|
results: List[Dict]) -> Dict: |
|
"""Analyze patterns in model errors""" |
|
error_patterns = { |
|
'false_positives': defaultdict(list), |
|
'false_negatives': defaultdict(list), |
|
} |
|
|
|
for idx, (true, pred, result) in enumerate(zip(true_labels, predictions, results)): |
|
if true != pred: |
|
|
|
flow_details = { |
|
'flow_id': result['flow_id'], |
|
'source_ip': result['source_ip'], |
|
'destination_ip': result['destination_ip'], |
|
'model_explanation': result['analysis'] |
|
} |
|
|
|
|
|
if true == 'benign': |
|
error_patterns['false_positives'][pred].append(flow_details) |
|
else: |
|
error_patterns['false_negatives'][true].append(flow_details) |
|
|
|
|
|
return { |
|
'false_positives': dict(error_patterns['false_positives']), |
|
'false_negatives': dict(error_patterns['false_negatives']) |
|
} |
|
|
|
def print_evaluation_summary(self, metrics: Dict): |
|
"""Print a human-readable summary of evaluation metrics""" |
|
self.logger.info("\n=== Model Evaluation Summary ===") |
|
|
|
|
|
self.logger.info(f"\nOverall Accuracy: {metrics['accuracy']:.4f}") |
|
|
|
|
|
self.logger.info("\nPer-class Performance:") |
|
class_metrics = metrics['classification_report'] |
|
for class_name in self.attack_types.keys(): |
|
if class_name in class_metrics: |
|
metrics_dict = class_metrics[class_name] |
|
self.logger.info(f"\n{class_name.upper()}:") |
|
self.logger.info(f" Precision: {metrics_dict.get('precision', 0):.4f}") |
|
self.logger.info(f" Recall: {metrics_dict.get('recall', 0):.4f}") |
|
self.logger.info(f" F1-Score: {metrics_dict.get('f1-score', 0):.4f}") |
|
if class_name in metrics['average_confidence']: |
|
self.logger.info(f" Avg Confidence: {metrics['average_confidence'][class_name]:.4f}") |
|
|
|
|
|
self.logger.info("\nError Analysis Summary:") |
|
error_analysis = metrics['error_analysis'] |
|
|
|
|
|
self.logger.info("\nMost Common False Positives:") |
|
for attack_type, errors in error_analysis['false_positives'].items(): |
|
self.logger.info(f" {attack_type}: {len(errors)} instances") |
|
|
|
|
|
self.logger.info("\nMost Common False Negatives:") |
|
for attack_type, errors in error_analysis['false_negatives'].items(): |
|
self.logger.info(f" {attack_type}: {len(errors)} instances") |
|
|
|
def main(): |
|
logger = setup_logging() |
|
|
|
|
|
evaluator = ModelEvaluator() |
|
|
|
try: |
|
|
|
metrics = evaluator.evaluate_results( |
|
results_file='analysis_results.json', |
|
test_data_file='data/test.csv' |
|
) |
|
|
|
|
|
evaluator.print_evaluation_summary(metrics) |
|
|
|
|
|
with open('evaluation_metrics.json', 'w') as f: |
|
json.dump(metrics, f, indent=2) |
|
logger.info("\nSaved detailed metrics to evaluation_metrics.json") |
|
|
|
except Exception as e: |
|
logger.error(f"Error during evaluation: {str(e)}") |
|
raise |
|
|
|
if __name__ == "__main__": |
|
main() |