Upload app.py
Browse files
app.py
CHANGED
@@ -15,24 +15,32 @@ tokenizer = None
|
|
15 |
label_mapping = {0: "✅ Correct", 1: "🤔 Conceptually Flawed", 2: "🔢 Computationally Flawed"}
|
16 |
|
17 |
def load_model():
|
18 |
-
"""Load your trained model
|
19 |
global model, tokenizer
|
20 |
|
21 |
try:
|
22 |
-
|
23 |
-
# Option 1: Load from local files
|
24 |
-
# model = AutoModelForSequenceClassification.from_pretrained("./your_model_directory")
|
25 |
-
# tokenizer = AutoTokenizer.from_pretrained("./your_model_directory")
|
26 |
|
27 |
-
#
|
28 |
-
#
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
-
|
|
|
|
|
32 |
logger.warning("Using placeholder model loading - replace with your actual model!")
|
33 |
|
34 |
-
#
|
35 |
-
model_name = "distilbert-base-uncased" # Replace with your model
|
36 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
37 |
model = AutoModelForSequenceClassification.from_pretrained(
|
38 |
model_name,
|
@@ -40,12 +48,7 @@ def load_model():
|
|
40 |
ignore_mismatched_sizes=True
|
41 |
)
|
42 |
|
43 |
-
|
44 |
-
return "Model loaded successfully!"
|
45 |
-
|
46 |
-
except Exception as e:
|
47 |
-
logger.error(f"Error loading model: {e}")
|
48 |
-
return f"Error loading model: {e}"
|
49 |
|
50 |
def classify_solution(question: str, solution: str):
|
51 |
"""
|
|
|
15 |
label_mapping = {0: "✅ Correct", 1: "🤔 Conceptually Flawed", 2: "🔢 Computationally Flawed"}
|
16 |
|
17 |
def load_model():
|
18 |
+
"""Load your trained LoRA adapter with base model"""
|
19 |
global model, tokenizer
|
20 |
|
21 |
try:
|
22 |
+
from peft import AutoPeftModelForSequenceClassification
|
|
|
|
|
|
|
23 |
|
24 |
+
# Load the LoRA adapter model
|
25 |
+
# The adapter files should be in a folder (e.g., "./lora_adapter")
|
26 |
+
model = AutoPeftModelForSequenceClassification.from_pretrained(
|
27 |
+
"./lora_adapter", # Path to your adapter files
|
28 |
+
torch_dtype=torch.float16,
|
29 |
+
device_map="auto"
|
30 |
+
)
|
31 |
+
|
32 |
+
# Load tokenizer from the same directory
|
33 |
+
tokenizer = AutoTokenizer.from_pretrained("./lora_adapter")
|
34 |
+
|
35 |
+
logger.info("LoRA model loaded successfully")
|
36 |
+
return "LoRA model loaded successfully!"
|
37 |
|
38 |
+
except Exception as e:
|
39 |
+
logger.error(f"Error loading LoRA model: {e}")
|
40 |
+
# Fallback to placeholder for testing
|
41 |
logger.warning("Using placeholder model loading - replace with your actual model!")
|
42 |
|
43 |
+
model_name = "microsoft/DialoGPT-medium" # Closer to Phi-4 architecture
|
|
|
44 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
45 |
model = AutoModelForSequenceClassification.from_pretrained(
|
46 |
model_name,
|
|
|
48 |
ignore_mismatched_sizes=True
|
49 |
)
|
50 |
|
51 |
+
return f"Fallback model loaded. LoRA error: {e}"
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
def classify_solution(question: str, solution: str):
|
54 |
"""
|