Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,11 +4,15 @@ import torch
|
|
4 |
import sqlite3
|
5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
6 |
from peft import PeftModel, PeftConfig
|
|
|
|
|
|
|
7 |
|
8 |
# β
Load fine-tuned models from Hugging Face Model Hub instead of Kaggle paths
|
9 |
codellama_model_path = "srishtirai/codellama-sql-finetuned" # Upload to HF Model Hub
|
10 |
mistral_model_path = "srishtirai/mistral-sql-finetuned" # Upload to HF Model Hub
|
11 |
|
|
|
12 |
def load_model(model_path):
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
14 |
tokenizer.pad_token = tokenizer.eos_token
|
@@ -16,15 +20,19 @@ def load_model(model_path):
|
|
16 |
|
17 |
peft_config = PeftConfig.from_pretrained(model_path)
|
18 |
base_model_name = peft_config.base_model_name_or_path
|
|
|
19 |
base_model = AutoModelForCausalLM.from_pretrained(
|
20 |
base_model_name,
|
21 |
-
torch_dtype=torch.float16,
|
22 |
-
device_map="auto"
|
|
|
23 |
)
|
|
|
24 |
model = PeftModel.from_pretrained(base_model, model_path)
|
25 |
model.eval()
|
26 |
return model, tokenizer
|
27 |
|
|
|
28 |
# β
Load both models from Hugging Face
|
29 |
codellama_model, codellama_tokenizer = load_model(codellama_model_path)
|
30 |
mistral_model, mistral_tokenizer = load_model(mistral_model_path)
|
|
|
4 |
import sqlite3
|
5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
6 |
from peft import PeftModel, PeftConfig
|
7 |
+
import torch
|
8 |
+
import os
|
9 |
+
os.makedirs("offload", exist_ok=True)
|
10 |
|
11 |
# β
Load fine-tuned models from Hugging Face Model Hub instead of Kaggle paths
|
12 |
codellama_model_path = "srishtirai/codellama-sql-finetuned" # Upload to HF Model Hub
|
13 |
mistral_model_path = "srishtirai/mistral-sql-finetuned" # Upload to HF Model Hub
|
14 |
|
15 |
+
|
16 |
def load_model(model_path):
|
17 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
18 |
tokenizer.pad_token = tokenizer.eos_token
|
|
|
20 |
|
21 |
peft_config = PeftConfig.from_pretrained(model_path)
|
22 |
base_model_name = peft_config.base_model_name_or_path
|
23 |
+
|
24 |
base_model = AutoModelForCausalLM.from_pretrained(
|
25 |
base_model_name,
|
26 |
+
torch_dtype=torch.float16, # Use FP16 to save memory
|
27 |
+
device_map="auto", # Automatically allocate layers to CPU/GPU
|
28 |
+
offload_folder="offload" # β
Offload large layers to disk
|
29 |
)
|
30 |
+
|
31 |
model = PeftModel.from_pretrained(base_model, model_path)
|
32 |
model.eval()
|
33 |
return model, tokenizer
|
34 |
|
35 |
+
|
36 |
# β
Load both models from Hugging Face
|
37 |
codellama_model, codellama_tokenizer = load_model(codellama_model_path)
|
38 |
mistral_model, mistral_tokenizer = load_model(mistral_model_path)
|