File size: 8,304 Bytes
bd7bfb6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
import pandas as pd
import numpy as np
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
from tqdm import tqdm
import logging
import argparse
from pathlib import Path
from sklearn.metrics import classification_report, accuracy_score, roc_auc_score
import json
def setup_logging():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging.getLogger(__name__)
def format_flow_prompt(row):
"""Format a single network flow into a prompt for the model"""
messages = [
{"role": "system", "content": "You are a cybersecurity expert analyzing network flows for potential security threats. You must classify each flow as either malicious or benign based on its characteristics. Respond with only the word 'malicious' or 'benign'."},
{"role": "user", "content": f"""Analyze this network flow:
Source IP: {row['IPV4_SRC_ADDR']} (Port: {row['L4_SRC_PORT']})
Destination IP: {row['IPV4_DST_ADDR']} (Port: {row['L4_DST_PORT']})
Protocol: {row['PROTOCOL']}
Layer 7 Protocol: {row['L7_PROTO']}
Traffic Volume: {row['IN_BYTES']} bytes in, {row['OUT_BYTES']} bytes out
Packets: {row['IN_PKTS']} packets in, {row['OUT_PKTS']} packets out
TCP Flags: {row['TCP_FLAGS']}
Duration: {row['FLOW_DURATION_MILLISECONDS']} ms
Is this network flow malicious or benign? Answer with only one word."""}
]
return messages
class ZeroShotTester:
def __init__(self, device="cuda"):
"""Initialize the model and tokenizer"""
self.logger = setup_logging()
self.device = device
model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
self.logger.info(f"Loading model {model_name}...")
self.tokenizer = AutoTokenizer.from_pretrained(
model_name,
use_fast=True
)
# Set pad token to eos token
self.tokenizer.pad_token = self.tokenizer.eos_token
self.model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
device_map="auto",
pad_token_id=self.tokenizer.eos_token_id # Set pad token ID in model
)
def predict_single(self, messages):
"""Generate prediction for a single prompt"""
# Prepare input
encodings = self.tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
return_tensors="pt"
)
# Create attention mask
attention_mask = torch.ones_like(encodings)
# Move to device
input_ids = encodings.to(self.device)
attention_mask = attention_mask.to(self.device)
# Generate prediction
with torch.no_grad():
outputs = self.model.generate(
input_ids=input_ids,
attention_mask=attention_mask,
max_new_tokens=5, # We only need one word
do_sample=False, # Deterministic output
temperature=0.1, # Low temperature for more focused output
pad_token_id=self.tokenizer.pad_token_id
)
# Extract response
response = outputs[0][input_ids.shape[-1]:]
response_text = self.tokenizer.decode(response, skip_special_tokens=True).strip().lower()
# Classify output
return 1 if 'malicious' in response_text else 0
def evaluate_dataset(self, test_path, batch_size=32, max_samples=None):
"""Evaluate the model on a test dataset"""
self.logger.info(f"Starting evaluation on {test_path}")
# Read and process the test set in chunks
chunk_iterator = pd.read_csv(test_path, chunksize=batch_size)
all_predictions = []
all_labels = []
total_processed = 0
try:
for chunk in tqdm(chunk_iterator, desc="Processing batches"):
if max_samples and total_processed >= max_samples:
break
# Generate prompts for the chunk
prompts = [format_flow_prompt(row) for _, row in chunk.iterrows()]
# Get predictions
predictions = []
for prompt in prompts:
try:
pred = self.predict_single(prompt)
predictions.append(pred)
except Exception as e:
self.logger.error(f"Error processing prompt: {e}")
predictions.append(0) # Default to benign on error
all_predictions.extend(predictions)
all_labels.extend(chunk['Label'].tolist())
total_processed += len(chunk)
if total_processed % 100 == 0:
self.logger.info(f"Processed {total_processed} samples...")
# Calculate and log intermediate metrics
curr_accuracy = accuracy_score(all_labels, all_predictions)
curr_auc = roc_auc_score(all_labels, all_predictions)
successful_samples = sum(1 for p in predictions if p is not None)
self.logger.info(f"Current Accuracy: {curr_accuracy:.4f}, AUC: {curr_auc:.4f}")
self.logger.info(f"Successfully processed samples in this batch: {successful_samples}/{len(predictions)}")
except Exception as e:
self.logger.error(f"Error during evaluation: {e}")
if len(all_predictions) == 0:
raise
# Calculate final metrics
accuracy = accuracy_score(all_labels[:total_processed], all_predictions[:total_processed])
auc = roc_auc_score(all_labels[:total_processed], all_predictions[:total_processed])
report = classification_report(all_labels[:total_processed], all_predictions[:total_processed])
return {
'accuracy': accuracy,
'auc': auc,
'classification_report': report,
'total_samples': total_processed,
'predictions': all_predictions[:total_processed],
'true_labels': all_labels[:total_processed]
}
def main():
parser = argparse.ArgumentParser(description="Zero-shot testing of LLaMA 3.1 on network flows")
parser.add_argument("--test_path", required=True, help="Path to test CSV file")
parser.add_argument("--output_dir", required=True, help="Directory to save results")
parser.add_argument("--batch_size", type=int, default=32, help="Batch size for processing")
parser.add_argument("--max_samples", type=int, default=None,
help="Maximum number of samples to test (None for all)")
args = parser.parse_args()
logger = setup_logging()
# Create output directory
output_path = Path(args.output_dir)
output_path.mkdir(parents=True, exist_ok=True)
# Initialize tester
tester = ZeroShotTester()
try:
# Evaluate model
results = tester.evaluate_dataset(
args.test_path,
batch_size=args.batch_size,
max_samples=args.max_samples
)
# Save results
logger.info("\nFinal Results:")
logger.info(f"Total samples processed: {results['total_samples']}")
logger.info(f"Successfully processed samples: {sum(1 for p in results['predictions'] if p is not None)}")
logger.info(f"Accuracy: {results['accuracy']:.4f}")
logger.info(f"AUC: {results['auc']:.4f}")
logger.info("\nClassification Report:")
logger.info(results['classification_report'])
# Save detailed results
with open(output_path / 'zero_shot_results.json', 'w') as f:
# Convert numpy arrays to lists for JSON serialization
results['predictions'] = [int(p) for p in results['predictions']]
results['true_labels'] = [int(l) for l in results['true_labels']]
json.dump(results, f, indent=4)
except Exception as e:
logger.error(f"Error in main execution: {e}")
raise
if __name__ == "__main__":
main() |