File size: 2,592 Bytes
6ab94fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# Import necessary libraries
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from datasets import load_dataset

# Step 1: Load pre-trained model and tokenizer
MODEL_NAME = "deepseek-ai/DeepSeek-V3-0324"  # Pre-trained model from Hugging Face
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)

# Step 2: Load your custom dataset from Hugging Face
dataset = load_dataset("epicDev123/deepseek")  # Replace with your dataset name if different

# Step 3: Tokenization function (tokenize the text data for model input)
def tokenize_function(examples):
    return tokenizer(examples["text"], padding="max_length", truncation=True)

# Tokenize the dataset
tokenized_datasets = dataset.map(tokenize_function, batched=True)

# Step 4: Set up training arguments
training_args = TrainingArguments(
    output_dir="./results",              # Output directory to save the fine-tuned model
    num_train_epochs=3,                  # Number of training epochs
    per_device_train_batch_size=8,       # Batch size for training
    per_device_eval_batch_size=8,        # Batch size for evaluation
    warmup_steps=500,                    # Number of warmup steps
    weight_decay=0.01,                   # Weight decay for regularization
    logging_dir="./logs",                # Directory for logs
    logging_steps=10,                    # Log training every 10 steps
    save_steps=500,                      # Save model checkpoints every 500 steps
    evaluation_strategy="epoch",         # Evaluate model after each epoch
    save_total_limit=2,                  # Limit the number of saved checkpoints
)

# Step 5: Initialize the Trainer
trainer = Trainer(
    model=model,                         # Model to train
    args=training_args,                  # Training arguments
    train_dataset=tokenized_datasets["train"],  # Training dataset
    eval_dataset=tokenized_datasets["validation"],  # Validation dataset (optional)
)

# Step 6: Fine-tune the model
trainer.train()

# Step 7: Save the fine-tuned model
model.save_pretrained("./fine_tuned_deepseek")   # Save the model to a directory
tokenizer.save_pretrained("./fine_tuned_deepseek")  # Save the tokenizer

# Step 8: Optionally, you can push the model to Hugging Face Model Hub (after logging into Hugging Face)
model.push_to_hub("your-username/fine-tuned-deepseek")  # Replace with your username and desired model name
tokenizer.push_to_hub("your-username/fine-tuned-deepseek")

print("Fine-tuning complete! Your model has been saved.")