File size: 5,567 Bytes
d6b181a d49524c 2f64eb3 d6b181a cbc75cf 4c7dba1 d6b181a d8899dd 5a7a017 d6b181a d49524c d6b181a 5a7a017 d6b181a 5a7a017 d6b181a 4aab23e d6b181a d8899dd d6b181a d8899dd 5a7a017 d6b181a 2f64eb3 d6b181a 2f64eb3 d6b181a 2f64eb3 d6b181a 2f64eb3 d6b181a 82c86ba d6b181a 2f64eb3 d6b181a 2f64eb3 e0f1848 d6b181a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
# app.py - Gradio version (much simpler for HF Spaces)
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import logging
import spaces
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global variables for model and tokenizer
model = None
tokenizer = None
label_mapping = {0: "✅ Correct", 1: "🤔 Conceptually Flawed", 2: "🔢 Computationally Flawed"}
def load_model():
"""Load your trained model here"""
global model, tokenizer
try:
# Replace these with your actual model path/name
# Option 1: Load from local files
# model = AutoModelForSequenceClassification.from_pretrained("./your_model_directory")
# tokenizer = AutoTokenizer.from_pretrained("./your_model_directory")
# Option 2: Load from Hugging Face Hub (if you upload your model there)
# model = AutoModelForSequenceClassification.from_pretrained("your-username/your-model-name")
# tokenizer = AutoTokenizer.from_pretrained("your-username/your-model-name")
# For now, we'll use a placeholder - replace this with your actual model loading
logger.warning("Using placeholder model loading - replace with your actual model!")
# Placeholder model loading (replace this!)
model_name = "distilbert-base-uncased" # Replace with your model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(
model_name,
num_labels=3,
ignore_mismatched_sizes=True
)
logger.info("Model loaded successfully")
return "Model loaded successfully!"
except Exception as e:
logger.error(f"Error loading model: {e}")
return f"Error loading model: {e}"
@spaces.GPU
def classify_solution(question: str, solution: str):
"""
Classify the math solution
Returns: (classification_label, confidence_score, explanation)
"""
if not question.strip() or not solution.strip():
return "Please fill in both fields", 0.0, ""
if not model or not tokenizer:
return "Model not loaded", 0.0, ""
try:
# Combine question and solution for input
text_input = f"Question: {question}\nSolution: {solution}"
# Tokenize input
inputs = tokenizer(
text_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=512
)
# Get model prediction
with torch.no_grad():
outputs = model(**inputs)
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
predicted_class = torch.argmax(predictions, dim=-1).item()
confidence = predictions[0][predicted_class].item()
classification = label_mapping[predicted_class]
# Create explanation based on classification
explanations = {
0: "The mathematical approach and calculations are both sound.",
1: "The approach or understanding has fundamental issues.",
2: "The approach is correct, but there are calculation errors."
}
explanation = explanations[predicted_class]
return classification, f"{confidence:.2%}", explanation
except Exception as e:
logger.error(f"Error during classification: {e}")
return f"Classification error: {str(e)}", "0%", ""
# Load model on startup
load_model()
# Create Gradio interface
with gr.Blocks(title="Math Solution Classifier", theme=gr.themes.Soft()) as app:
gr.Markdown("# 🧮 Math Solution Classifier")
gr.Markdown("Classify math solutions as correct, conceptually flawed, or computationally flawed.")
with gr.Row():
with gr.Column():
question_input = gr.Textbox(
label="Math Question",
placeholder="e.g., Solve for x: 2x + 5 = 13",
lines=3
)
solution_input = gr.Textbox(
label="Proposed Solution",
placeholder="e.g., 2x + 5 = 13\n2x = 13 - 5\n2x = 8\nx = 4",
lines=5
)
classify_btn = gr.Button("Classify Solution", variant="primary")
with gr.Column():
classification_output = gr.Textbox(label="Classification", interactive=False)
confidence_output = gr.Textbox(label="Confidence", interactive=False)
explanation_output = gr.Textbox(label="Explanation", interactive=False, lines=3)
# Examples
gr.Examples(
examples=[
[
"Solve for x: 2x + 5 = 13",
"2x + 5 = 13\n2x = 13 - 5\n2x = 8\nx = 4"
],
[
"John has three apples and Mary has seven, how many apples do they have together?",
"They have 7 + 3 = 11 apples." # This should be computationally flawed
],
[
"What is 15% of 200?",
"15% = 15/100 = 0.15\n0.15 × 200 = 30"
]
],
inputs=[question_input, solution_input]
)
classify_btn.click(
fn=classify_solution,
inputs=[question_input, solution_input],
outputs=[classification_output, confidence_output, explanation_output]
)
if __name__ == "__main__":
app.launch() |