Upload app.py
Browse files
app.py
CHANGED
@@ -121,17 +121,19 @@ def _classify_generate(question: str, solution: str) -> Tuple[str, str, str]:
|
|
121 |
# Prompt must match the format you used in tuning
|
122 |
prompt = (
|
123 |
"You are a mathematics tutor.\n"
|
124 |
-
"You are given a math word problem and a student's solution.
|
125 |
-
"
|
126 |
-
"-
|
127 |
-
"-
|
|
|
128 |
"Reply with ONLY one of these JSON lines:\n"
|
129 |
'{"verdict": "correct"}\n'
|
130 |
'{"verdict": "conceptual"}\n'
|
131 |
-
'{"verdict": "computational"}\n\n
|
132 |
f"Question: {question}\n\nSolution:\n{solution}\n\nAnswer:"
|
133 |
)
|
134 |
|
|
|
135 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
136 |
with torch.no_grad():
|
137 |
out_ids = model.generate(
|
@@ -139,8 +141,11 @@ def _classify_generate(question: str, solution: str) -> Tuple[str, str, str]:
|
|
139 |
max_new_tokens=32,
|
140 |
pad_token_id=tokenizer.eos_token_id,
|
141 |
)
|
142 |
-
generated = tokenizer.decode(
|
143 |
-
|
|
|
|
|
|
|
144 |
|
145 |
# Try to parse last JSON line
|
146 |
verdict = "Unparsed"
|
|
|
121 |
# Prompt must match the format you used in tuning
|
122 |
prompt = (
|
123 |
"You are a mathematics tutor.\n"
|
124 |
+
"You are given a math word problem and a student's solution. "
|
125 |
+
"Decide whether the solution is correct.\n\n"
|
126 |
+
"- Correct = all reasoning and calculations are correct.\n"
|
127 |
+
"- Conceptual Error = reasoning is wrong.\n"
|
128 |
+
"- Computational Error= reasoning okay but arithmetic off.\n\n"
|
129 |
"Reply with ONLY one of these JSON lines:\n"
|
130 |
'{"verdict": "correct"}\n'
|
131 |
'{"verdict": "conceptual"}\n'
|
132 |
+
'{"verdict": "computational"}\n\n'
|
133 |
f"Question: {question}\n\nSolution:\n{solution}\n\nAnswer:"
|
134 |
)
|
135 |
|
136 |
+
|
137 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
138 |
with torch.no_grad():
|
139 |
out_ids = model.generate(
|
|
|
141 |
max_new_tokens=32,
|
142 |
pad_token_id=tokenizer.eos_token_id,
|
143 |
)
|
144 |
+
generated = tokenizer.decode(
|
145 |
+
out_ids[0][inputs["input_ids"].shape[1]:],
|
146 |
+
skip_special_tokens=True,
|
147 |
+
).strip()
|
148 |
+
|
149 |
|
150 |
# Try to parse last JSON line
|
151 |
verdict = "Unparsed"
|