File size: 5,182 Bytes
a61db6a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import pandas as pd
from unsloth import FastLanguageModel
from transformers import TextStreamer
import torch
import random
import logging
def setup_logging():
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging.getLogger(__name__)
def print_separator(title="", char="=", length=80):
"""Print a separator with optional title"""
if title:
side_length = (length - len(title) - 2) // 2
print(char * side_length + f" {title} " + char * side_length)
else:
print(char * length)
def format_flow_prompt(row):
"""Format a single network flow into a prompt"""
flow_text = f"""Network Flow Description:
Source: {row['IPV4_SRC_ADDR']} (Port: {row['L4_SRC_PORT']})
Destination: {row['IPV4_DST_ADDR']} (Port: {row['L4_DST_PORT']})
Protocol Information:
- Protocol ID: {row['PROTOCOL']}
- Layer 7 Protocol: {row['L7_PROTO']}
- TCP Flags: {row['TCP_FLAGS']}
Traffic Metrics:
- Bytes: {row['IN_BYTES']} inbound, {row['OUT_BYTES']} outbound
- Packets: {row['IN_PKTS']} inbound, {row['OUT_PKTS']} outbound
- Duration: {row['FLOW_DURATION_MILLISECONDS']} milliseconds"""
# Format in LLaMA-3 style
return f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>
Analyze this network flow for potential security threats:
{flow_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
def analyze_single_flow(model_path="cybersec_model_output/checkpoint-4329",
test_file="data/test.csv",
index=None,
attack_type=None):
"""Analyze a single network flow and show the model's complete response"""
logger = setup_logging()
print_separator("LOADING DATA AND MODEL", "=")
# Load test data
logger.info(f"Loading test data from {test_file}")
test_df = pd.read_csv(test_file)
# Select sample based on criteria
if attack_type:
attack_samples = test_df[test_df['Attack'].str.lower() == attack_type.lower()]
if len(attack_samples) == 0:
raise ValueError(f"No samples found for attack type: {attack_type}")
sample = attack_samples.iloc[random.randint(0, len(attack_samples)-1)]
elif index is not None:
sample = test_df.iloc[index]
else:
sample = test_df.iloc[random.randint(0, len(test_df)-1)]
# Load model
logger.info(f"Loading model from {model_path}")
model, tokenizer = FastLanguageModel.from_pretrained(
model_path,
max_seq_length=2048,
load_in_4bit=True,
)
# Set up tokenizer
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"
FastLanguageModel.for_inference(model)
print_separator("SAMPLE INFORMATION", "=")
logger.info(f"Selected flow index: {sample.name}")
logger.info(f"True label: {sample['Attack']}")
# Prepare input
prompt = format_flow_prompt(sample)
inputs = tokenizer(
prompt,
return_tensors="pt",
truncation=True,
max_length=2048
).to("cuda")
# Set up streamer for real-time output
streamer = TextStreamer(tokenizer)
with open("model_output.txt", "w") as f:
# Write separators and metadata
f.write("=" * 80 + "\n")
f.write(f"NETWORK FLOW ANALYSIS\n")
f.write("=" * 80 + "\n\n")
f.write("-" * 80 + "\n")
f.write("METADATA\n")
f.write("-" * 80 + "\n")
f.write(f"Flow Index: {sample.name}\n")
f.write(f"True Label: {sample['Attack']}\n\n")
f.write("-" * 80 + "\n")
f.write("INPUT PROMPT\n")
f.write("-" * 80 + "\n")
f.write(f"{prompt}\n\n")
print_separator("MODEL OUTPUT", "=")
logger.info("Generating analysis...")
# Generate and capture output
outputs = model.generate(
**inputs,
max_new_tokens=256,
streamer=streamer,
use_cache=True
)
# Write the complete output
f.write("-" * 80 + "\n")
f.write("COMPLETE OUTPUT (including special tokens)\n")
f.write("-" * 80 + "\n")
full_output = tokenizer.decode(outputs[0], skip_special_tokens=False)
f.write(f"{full_output}\n\n")
# Write cleaned output
f.write("-" * 80 + "\n")
f.write("CLEANED OUTPUT (without special tokens)\n")
f.write("-" * 80 + "\n")
cleaned_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
f.write(cleaned_output)
f.write("\n" + "=" * 80 + "\n")
print_separator()
logger.info("Output saved to model_output.txt")
print_separator()
def main():
# Example usage:
print_separator("STARTING ANALYSIS", "=")
# Choose one of these options:
# 1. Random sample
analyze_single_flow()
# 2. Specific attack type
# analyze_single_flow(attack_type="ddos")
# 3. Specific index
# analyze_single_flow(index=42)
print_separator("ANALYSIS COMPLETE", "=")
if __name__ == "__main__":
main() |