File size: 4,841 Bytes
a61db6a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import torch
from unsloth import FastLanguageModel
import pandas as pd
from transformers import TextStreamer
import logging
from typing import List, Dict
import json
def setup_logging():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging.getLogger(__name__)
class NetworkFlowAnalyzer:
def __init__(self, model_path: str):
self.logger = setup_logging()
self.logger.info(f"Loading model from {model_path}")
# Load model and tokenizer
self.model, self.tokenizer = FastLanguageModel.from_pretrained(
model_path,
max_seq_length=2048,
load_in_4bit=True,
)
# Enable faster inference
FastLanguageModel.for_inference(self.model)
# Set up tokenizer for generation
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.padding_side = "right"
def format_flow_prompt(self, row: pd.Series) -> str:
"""Format a network flow into a prompt"""
flow_text = f"""Network Flow Description:
Source: {row['IPV4_SRC_ADDR']} (Port: {row['L4_SRC_PORT']})
Destination: {row['IPV4_DST_ADDR']} (Port: {row['L4_DST_PORT']})
Protocol Information:
- Protocol ID: {row['PROTOCOL']}
- Layer 7 Protocol: {row['L7_PROTO']}
- TCP Flags: {row['TCP_FLAGS']}
Traffic Metrics:
- Bytes: {row['IN_BYTES']} inbound, {row['OUT_BYTES']} outbound
- Packets: {row['IN_PKTS']} inbound, {row['OUT_PKTS']} outbound
- Duration: {row['FLOW_DURATION_MILLISECONDS']} milliseconds"""
# Format in LLaMA-3 style
return f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
Analyze this network flow for potential security threats:
{flow_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
def analyze_flow(self, row: pd.Series, max_new_tokens: int = 256, stream: bool = False) -> str:
"""Analyze a single network flow"""
# Prepare input
prompt = self.format_flow_prompt(row)
inputs = self.tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=2048
).to("cuda")
# Set up streamer if requested
streamer = TextStreamer(self.tokenizer) if stream else None
# Generate prediction
outputs = self.model.generate(
**inputs,
max_new_tokens=max_new_tokens,
streamer=streamer,
use_cache=True
)
if not stream:
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return None
def analyze_batch(self, df: pd.DataFrame, output_file: str = None) -> List[Dict]:
"""Analyze a batch of network flows"""
results = []
for idx, row in df.iterrows():
self.logger.info(f"Analyzing flow {idx+1}/{len(df)}")
try:
analysis = self.analyze_flow(row)
result = {
"flow_id": idx,
"source_ip": row["IPV4_SRC_ADDR"],
"destination_ip": row["IPV4_DST_ADDR"],
"analysis": analysis,
"true_label": row.get("Label", "Unknown"),
"attack_type": row.get("Attack", "Unknown")
}
results.append(result)
except Exception as e:
self.logger.error(f"Error analyzing flow {idx}: {str(e)}")
continue
if output_file:
with open(output_file, 'w') as f:
json.dump(results, f, indent=2)
return results
def main():
logger = setup_logging()
# Initialize analyzer
analyzer = NetworkFlowAnalyzer("cybersec_model_output/checkpoint-4329")
# Load test data
test_df = pd.read_csv("data/test.csv")
logger.info(f"Loaded {len(test_df)} test samples")
# Option 1: Stream analysis of a single flow
logger.info("\nAnalyzing single flow with streaming:")
analyzer.analyze_flow(test_df.iloc[0], stream=True)
# Option 2: Batch analysis
logger.info("\nAnalyzing batch of flows:")
sample_size = min(100, len(test_df)) # Analyze first 100 flows
results = analyzer.analyze_batch(
test_df.head(sample_size),
output_file="analysis_results.json"
)
# Print some statistics
logger.info(f"\nAnalyzed {len(results)} flows")
if "Label" in test_df.columns:
true_positives = sum(1 for r in results if "malicious" in r["analysis"].lower()
and r["true_label"] == 1)
logger.info(f"Detected {true_positives} potentially malicious flows")
if __name__ == "__main__":
main() |