|
import os |
|
os.environ["CUDA_VISIBLE_DEVICES"]="3" |
|
import torch |
|
from datasets import Dataset |
|
from unsloth import FastLanguageModel |
|
import pandas as pd |
|
import numpy as np |
|
from sklearn.preprocessing import MinMaxScaler |
|
from transformers import TrainingArguments |
|
from trl import SFTTrainer |
|
import logging |
|
from typing import List, Dict |
|
import json |
|
|
|
def setup_logging(): |
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(levelname)s - %(message)s' |
|
) |
|
return logging.getLogger(__name__) |
|
def sample_balanced_dataset(df, max_samples_per_class=1000): |
|
"""Sample a balanced subset of the data""" |
|
sampled_dfs = [] |
|
|
|
|
|
benign_df = df[df['Attack'].str.lower() == 'benign'] |
|
attack_df = df[df['Attack'].str.lower() != 'benign'] |
|
|
|
|
|
if len(benign_df) > max_samples_per_class: |
|
benign_sampled = benign_df.sample(n=max_samples_per_class, random_state=42) |
|
sampled_dfs.append(benign_sampled) |
|
else: |
|
sampled_dfs.append(benign_df) |
|
|
|
|
|
for attack_type in attack_df['Attack'].unique(): |
|
attack_type_df = attack_df[attack_df['Attack'] == attack_type] |
|
if len(attack_type_df) > max_samples_per_class: |
|
sampled = attack_type_df.sample(n=max_samples_per_class, random_state=42) |
|
sampled_dfs.append(sampled) |
|
else: |
|
sampled_dfs.append(attack_type_df) |
|
|
|
return pd.concat(sampled_dfs, ignore_index=True) |
|
|
|
class NetworkFlowDataProcessor: |
|
def __init__(self): |
|
self.logger = setup_logging() |
|
self.scaler = MinMaxScaler() |
|
self.numerical_features = [ |
|
'L4_SRC_PORT', 'L4_DST_PORT', 'PROTOCOL', 'L7_PROTO', |
|
'IN_BYTES', 'OUT_BYTES', 'IN_PKTS', 'OUT_PKTS', |
|
'TCP_FLAGS', 'FLOW_DURATION_MILLISECONDS' |
|
] |
|
self.categorical_features = ['IPV4_SRC_ADDR', 'IPV4_DST_ADDR'] |
|
|
|
def process_ip_address(self, ip: str) -> str: |
|
"""Convert IP address to a more descriptive format""" |
|
parts = ip.split('.') |
|
if parts[0] == '192' and parts[1] == '168': |
|
return f"internal_network_{parts[2]}_{parts[3]}" |
|
return f"external_network_{ip}" |
|
|
|
def format_flow_data(self, row: pd.Series) -> str: |
|
"""Format network flow data into a descriptive text""" |
|
return f"""Network Flow Description: |
|
Source: {self.process_ip_address(row['IPV4_SRC_ADDR'])} (Port: {row['L4_SRC_PORT']}) |
|
Destination: {self.process_ip_address(row['IPV4_DST_ADDR'])} (Port: {row['L4_DST_PORT']}) |
|
Protocol Information: |
|
- Protocol ID: {row['PROTOCOL']} |
|
- Layer 7 Protocol: {row['L7_PROTO']} |
|
- TCP Flags: {row['TCP_FLAGS']} |
|
Traffic Metrics: |
|
- Bytes: {row['IN_BYTES']} inbound, {row['OUT_BYTES']} outbound |
|
- Packets: {row['IN_PKTS']} inbound, {row['OUT_PKTS']} outbound |
|
- Duration: {row['FLOW_DURATION_MILLISECONDS']} milliseconds""" |
|
|
|
def get_attack_description(self, attack_type: str) -> str: |
|
"""Get detailed description of attack type""" |
|
descriptions = { |
|
"benign": "This is normal network traffic with no malicious intent.", |
|
"ddos": "A Distributed Denial of Service attack attempting to overwhelm network resources.", |
|
"dos": "A Denial of Service attack targeting system availability.", |
|
"injection": "An attack attempting to inject malicious code or commands.", |
|
"scanning": "Network scanning activity to discover vulnerabilities.", |
|
"backdoor": "Malicious activity indicating backdoor access attempts.", |
|
"mitm": "Man-in-the-Middle attack intercepting network communications.", |
|
"password": "Password-based attack attempting unauthorized access.", |
|
"ransomware": "Ransomware-related network activity.", |
|
"xss": "Cross-Site Scripting attack targeting web applications." |
|
} |
|
return descriptions.get(attack_type.lower(), "Unknown attack type") |
|
|
|
def prepare_training_text(self, row: pd.Series) -> str: |
|
"""Prepare single training example in LLaMA-3 chat format""" |
|
flow_text = self.format_flow_data(row) |
|
attack_type = row['Attack'].lower() if 'Attack' in row else 'benign' |
|
attack_desc = self.get_attack_description(attack_type) |
|
|
|
return f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|> |
|
Analyze this network flow for potential security threats: |
|
|
|
{flow_text}<|eot_id|><|start_header_id|>assistant<|end_header_id|> |
|
This network flow is classified as {attack_type}. {attack_desc} |
|
|
|
Key indicators from the flow data: |
|
- Traffic volume: {row['IN_BYTES'] + row['OUT_BYTES']} total bytes |
|
- Flow duration: {row['FLOW_DURATION_MILLISECONDS']} ms |
|
- Protocol behavior: {row['TCP_FLAGS']} TCP flags<|eot_id|>""" |
|
|
|
def load_and_process_data(train_path: str, processor: NetworkFlowDataProcessor, max_samples_per_class=50000): |
|
"""Load and process the training data""" |
|
logger = setup_logging() |
|
logger.info(f"Loading data from {train_path}") |
|
|
|
df = pd.read_csv(train_path) |
|
df = sample_balanced_dataset(df, max_samples_per_class) |
|
logger.info(f"Sampled dataset size: {len(df)}") |
|
|
|
|
|
texts = [processor.prepare_training_text(row) for _, row in df.iterrows()] |
|
dataset = Dataset.from_pandas(pd.DataFrame({'text': texts})) |
|
|
|
return dataset |
|
|
|
def main(): |
|
logger = setup_logging() |
|
|
|
|
|
processor = NetworkFlowDataProcessor() |
|
|
|
|
|
train_dataset = load_and_process_data("data/train.csv", processor, max_samples_per_class=50000) |
|
|
|
|
|
model, tokenizer = FastLanguageModel.from_pretrained( |
|
model_name="unsloth/llama-3-8b-Instruct-bnb-4bit", |
|
max_seq_length=2048, |
|
load_in_4bit=True, |
|
) |
|
|
|
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
tokenizer.padding_side = "right" |
|
|
|
|
|
model = FastLanguageModel.get_peft_model( |
|
model, |
|
r=16, |
|
target_modules=[ |
|
"q_proj", "k_proj", "v_proj", "o_proj", |
|
"gate_proj", "up_proj", "down_proj", |
|
], |
|
lora_alpha=16, |
|
lora_dropout=0, |
|
bias="none", |
|
use_gradient_checkpointing="unsloth", |
|
random_state=3407, |
|
) |
|
|
|
|
|
training_args = TrainingArguments( |
|
output_dir="cybersec_model_output", |
|
num_train_epochs=3, |
|
per_device_train_batch_size=64, |
|
gradient_accumulation_steps=4, |
|
learning_rate=2e-4, |
|
bf16=True, |
|
logging_steps=10, |
|
save_strategy="epoch", |
|
optim="adamw_8bit", |
|
lr_scheduler_type="cosine", |
|
) |
|
|
|
|
|
trainer = SFTTrainer( |
|
model=model, |
|
tokenizer=tokenizer, |
|
train_dataset=train_dataset, |
|
dataset_text_field="text", |
|
max_seq_length=2048, |
|
args=training_args, |
|
) |
|
|
|
|
|
logger.info("Starting training...") |
|
trainer.train() |
|
|
|
|
|
logger.info("Saving model...") |
|
model.save_pretrained("cybersec_model") |
|
tokenizer.save_pretrained("cybersec_model") |
|
|
|
if __name__ == "__main__": |
|
main() |