|
import pandas as pd |
|
import numpy as np |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import torch |
|
from tqdm import tqdm |
|
import logging |
|
import argparse |
|
from pathlib import Path |
|
from sklearn.metrics import classification_report, accuracy_score, roc_auc_score |
|
import json |
|
|
|
def setup_logging(): |
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(levelname)s - %(message)s' |
|
) |
|
return logging.getLogger(__name__) |
|
|
|
def format_flow_prompt(row): |
|
"""Format a single network flow into a prompt for the model""" |
|
messages = [ |
|
{"role": "system", "content": "You are a cybersecurity expert analyzing network flows for potential security threats. You must classify each flow as either malicious or benign based on its characteristics. Respond with only the word 'malicious' or 'benign'."}, |
|
{"role": "user", "content": f"""Analyze this network flow: |
|
Source IP: {row['IPV4_SRC_ADDR']} (Port: {row['L4_SRC_PORT']}) |
|
Destination IP: {row['IPV4_DST_ADDR']} (Port: {row['L4_DST_PORT']}) |
|
Protocol: {row['PROTOCOL']} |
|
Layer 7 Protocol: {row['L7_PROTO']} |
|
Traffic Volume: {row['IN_BYTES']} bytes in, {row['OUT_BYTES']} bytes out |
|
Packets: {row['IN_PKTS']} packets in, {row['OUT_PKTS']} packets out |
|
TCP Flags: {row['TCP_FLAGS']} |
|
Duration: {row['FLOW_DURATION_MILLISECONDS']} ms |
|
|
|
Is this network flow malicious or benign? Answer with only one word."""} |
|
] |
|
return messages |
|
|
|
class ZeroShotTester: |
|
def __init__(self, device="cuda"): |
|
"""Initialize the model and tokenizer""" |
|
self.logger = setup_logging() |
|
self.device = device |
|
model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct" |
|
|
|
self.logger.info(f"Loading model {model_name}...") |
|
self.tokenizer = AutoTokenizer.from_pretrained( |
|
model_name, |
|
use_fast=True |
|
) |
|
|
|
self.tokenizer.pad_token = self.tokenizer.eos_token |
|
|
|
self.model = AutoModelForCausalLM.from_pretrained( |
|
model_name, |
|
torch_dtype=torch.float16, |
|
device_map="auto", |
|
pad_token_id=self.tokenizer.eos_token_id |
|
) |
|
|
|
def predict_single(self, messages): |
|
"""Generate prediction for a single prompt""" |
|
|
|
encodings = self.tokenizer.apply_chat_template( |
|
messages, |
|
add_generation_prompt=True, |
|
return_tensors="pt" |
|
) |
|
|
|
|
|
attention_mask = torch.ones_like(encodings) |
|
|
|
|
|
input_ids = encodings.to(self.device) |
|
attention_mask = attention_mask.to(self.device) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = self.model.generate( |
|
input_ids=input_ids, |
|
attention_mask=attention_mask, |
|
max_new_tokens=5, |
|
do_sample=False, |
|
temperature=0.1, |
|
pad_token_id=self.tokenizer.pad_token_id |
|
) |
|
|
|
|
|
response = outputs[0][input_ids.shape[-1]:] |
|
response_text = self.tokenizer.decode(response, skip_special_tokens=True).strip().lower() |
|
|
|
|
|
return 1 if 'malicious' in response_text else 0 |
|
|
|
def evaluate_dataset(self, test_path, batch_size=32, max_samples=None): |
|
"""Evaluate the model on a test dataset""" |
|
self.logger.info(f"Starting evaluation on {test_path}") |
|
|
|
|
|
chunk_iterator = pd.read_csv(test_path, chunksize=batch_size) |
|
all_predictions = [] |
|
all_labels = [] |
|
total_processed = 0 |
|
|
|
try: |
|
for chunk in tqdm(chunk_iterator, desc="Processing batches"): |
|
if max_samples and total_processed >= max_samples: |
|
break |
|
|
|
|
|
prompts = [format_flow_prompt(row) for _, row in chunk.iterrows()] |
|
|
|
|
|
predictions = [] |
|
for prompt in prompts: |
|
try: |
|
pred = self.predict_single(prompt) |
|
predictions.append(pred) |
|
except Exception as e: |
|
self.logger.error(f"Error processing prompt: {e}") |
|
predictions.append(0) |
|
|
|
all_predictions.extend(predictions) |
|
all_labels.extend(chunk['Label'].tolist()) |
|
|
|
total_processed += len(chunk) |
|
if total_processed % 100 == 0: |
|
self.logger.info(f"Processed {total_processed} samples...") |
|
|
|
|
|
curr_accuracy = accuracy_score(all_labels, all_predictions) |
|
curr_auc = roc_auc_score(all_labels, all_predictions) |
|
successful_samples = sum(1 for p in predictions if p is not None) |
|
self.logger.info(f"Current Accuracy: {curr_accuracy:.4f}, AUC: {curr_auc:.4f}") |
|
self.logger.info(f"Successfully processed samples in this batch: {successful_samples}/{len(predictions)}") |
|
|
|
except Exception as e: |
|
self.logger.error(f"Error during evaluation: {e}") |
|
if len(all_predictions) == 0: |
|
raise |
|
|
|
|
|
accuracy = accuracy_score(all_labels[:total_processed], all_predictions[:total_processed]) |
|
auc = roc_auc_score(all_labels[:total_processed], all_predictions[:total_processed]) |
|
report = classification_report(all_labels[:total_processed], all_predictions[:total_processed]) |
|
|
|
return { |
|
'accuracy': accuracy, |
|
'auc': auc, |
|
'classification_report': report, |
|
'total_samples': total_processed, |
|
'predictions': all_predictions[:total_processed], |
|
'true_labels': all_labels[:total_processed] |
|
} |
|
|
|
def main(): |
|
parser = argparse.ArgumentParser(description="Zero-shot testing of LLaMA 3.1 on network flows") |
|
parser.add_argument("--test_path", required=True, help="Path to test CSV file") |
|
parser.add_argument("--output_dir", required=True, help="Directory to save results") |
|
parser.add_argument("--batch_size", type=int, default=32, help="Batch size for processing") |
|
parser.add_argument("--max_samples", type=int, default=None, |
|
help="Maximum number of samples to test (None for all)") |
|
|
|
args = parser.parse_args() |
|
logger = setup_logging() |
|
|
|
|
|
output_path = Path(args.output_dir) |
|
output_path.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
tester = ZeroShotTester() |
|
|
|
try: |
|
|
|
results = tester.evaluate_dataset( |
|
args.test_path, |
|
batch_size=args.batch_size, |
|
max_samples=args.max_samples |
|
) |
|
|
|
|
|
logger.info("\nFinal Results:") |
|
logger.info(f"Total samples processed: {results['total_samples']}") |
|
logger.info(f"Successfully processed samples: {sum(1 for p in results['predictions'] if p is not None)}") |
|
logger.info(f"Accuracy: {results['accuracy']:.4f}") |
|
logger.info(f"AUC: {results['auc']:.4f}") |
|
logger.info("\nClassification Report:") |
|
logger.info(results['classification_report']) |
|
|
|
|
|
with open(output_path / 'zero_shot_results.json', 'w') as f: |
|
|
|
results['predictions'] = [int(p) for p in results['predictions']] |
|
results['true_labels'] = [int(l) for l in results['true_labels']] |
|
json.dump(results, f, indent=4) |
|
|
|
except Exception as e: |
|
logger.error(f"Error in main execution: {e}") |
|
raise |
|
|
|
if __name__ == "__main__": |
|
main() |