Hmehdi515's picture
Upload 5 files
a61db6a verified
import torch
from unsloth import FastLanguageModel
import pandas as pd
from transformers import TextStreamer
import logging
from typing import List, Dict
import json
def setup_logging():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging.getLogger(__name__)
class NetworkFlowAnalyzer:
def __init__(self, model_path: str):
self.logger = setup_logging()
self.logger.info(f"Loading model from {model_path}")
# Load model and tokenizer
self.model, self.tokenizer = FastLanguageModel.from_pretrained(
model_path,
max_seq_length=2048,
load_in_4bit=True,
)
# Enable faster inference
FastLanguageModel.for_inference(self.model)
# Set up tokenizer for generation
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.padding_side = "right"
def format_flow_prompt(self, row: pd.Series) -> str:
"""Format a network flow into a prompt"""
flow_text = f"""Network Flow Description:
Source: {row['IPV4_SRC_ADDR']} (Port: {row['L4_SRC_PORT']})
Destination: {row['IPV4_DST_ADDR']} (Port: {row['L4_DST_PORT']})
Protocol Information:
- Protocol ID: {row['PROTOCOL']}
- Layer 7 Protocol: {row['L7_PROTO']}
- TCP Flags: {row['TCP_FLAGS']}
Traffic Metrics:
- Bytes: {row['IN_BYTES']} inbound, {row['OUT_BYTES']} outbound
- Packets: {row['IN_PKTS']} inbound, {row['OUT_PKTS']} outbound
- Duration: {row['FLOW_DURATION_MILLISECONDS']} milliseconds"""
# Format in LLaMA-3 style
return f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
Analyze this network flow for potential security threats:
{flow_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
def analyze_flow(self, row: pd.Series, max_new_tokens: int = 256, stream: bool = False) -> str:
"""Analyze a single network flow"""
# Prepare input
prompt = self.format_flow_prompt(row)
inputs = self.tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=2048
).to("cuda")
# Set up streamer if requested
streamer = TextStreamer(self.tokenizer) if stream else None
# Generate prediction
outputs = self.model.generate(
**inputs,
max_new_tokens=max_new_tokens,
streamer=streamer,
use_cache=True
)
if not stream:
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return None
def analyze_batch(self, df: pd.DataFrame, output_file: str = None) -> List[Dict]:
"""Analyze a batch of network flows"""
results = []
for idx, row in df.iterrows():
self.logger.info(f"Analyzing flow {idx+1}/{len(df)}")
try:
analysis = self.analyze_flow(row)
result = {
"flow_id": idx,
"source_ip": row["IPV4_SRC_ADDR"],
"destination_ip": row["IPV4_DST_ADDR"],
"analysis": analysis,
"true_label": row.get("Label", "Unknown"),
"attack_type": row.get("Attack", "Unknown")
}
results.append(result)
except Exception as e:
self.logger.error(f"Error analyzing flow {idx}: {str(e)}")
continue
if output_file:
with open(output_file, 'w') as f:
json.dump(results, f, indent=2)
return results
def main():
logger = setup_logging()
# Initialize analyzer
analyzer = NetworkFlowAnalyzer("cybersec_model_output/checkpoint-4329")
# Load test data
test_df = pd.read_csv("data/test.csv")
logger.info(f"Loaded {len(test_df)} test samples")
# Option 1: Stream analysis of a single flow
logger.info("\nAnalyzing single flow with streaming:")
analyzer.analyze_flow(test_df.iloc[0], stream=True)
# Option 2: Batch analysis
logger.info("\nAnalyzing batch of flows:")
sample_size = min(100, len(test_df)) # Analyze first 100 flows
results = analyzer.analyze_batch(
test_df.head(sample_size),
output_file="analysis_results.json"
)
# Print some statistics
logger.info(f"\nAnalyzed {len(results)} flows")
if "Label" in test_df.columns:
true_positives = sum(1 for r in results if "malicious" in r["analysis"].lower()
and r["true_label"] == 1)
logger.info(f"Detected {true_positives} potentially malicious flows")
if __name__ == "__main__":
main()