mcamargo00 commited on
Commit
1334832
·
verified ·
1 Parent(s): 47127c7

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +32 -32
  2. app.py +11 -5
  3. deployment_files.py +48 -48
  4. requirements.txt +1 -5
  5. webapp_TODO.md +4 -0
README.md CHANGED
@@ -1,33 +1,33 @@
1
- ---
2
- title: Math Solution Classifier
3
- emoji: 🧮
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 4.44.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- # Math Solution Classifier
13
-
14
- This application classifies math solutions into three categories:
15
- - **Correct**: Solution is mathematically sound
16
- - **Conceptually Flawed**: Wrong approach or understanding
17
- - **Computationally Flawed**: Right approach, calculation errors
18
-
19
- ## Usage
20
-
21
- 1. Enter a math question
22
- 2. Enter the proposed solution
23
- 3. Click "Classify Solution"
24
- 4. Get instant feedback on the solution quality
25
-
26
- Built with Gradio and your custom trained model.
27
-
28
- # requirements.txt
29
- gradio==4.44.0
30
- torch==2.1.0
31
- transformers==4.35.0
32
- peft==0.7.1
33
  accelerate==0.25.0
 
1
+ ---
2
+ title: Math Solution Classifier
3
+ emoji: 🧮
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.44.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ # Math Solution Classifier
13
+
14
+ This application classifies math solutions into three categories:
15
+ - **Correct**: Solution is mathematically sound
16
+ - **Conceptually Flawed**: Wrong approach or understanding
17
+ - **Computationally Flawed**: Right approach, calculation errors
18
+
19
+ ## Usage
20
+
21
+ 1. Enter a math question
22
+ 2. Enter the proposed solution
23
+ 3. Click "Classify Solution"
24
+ 4. Get instant feedback on the solution quality
25
+
26
+ Built with Gradio and your custom trained model.
27
+
28
+ # requirements.txt
29
+ gradio==4.44.0
30
+ torch==2.1.0
31
+ transformers==4.35.0
32
+ peft==0.7.1
33
  accelerate==0.25.0
app.py CHANGED
@@ -18,6 +18,8 @@ import re
18
  import math
19
  import time
20
 
 
 
21
  # Set up logging
22
  logging.basicConfig(level=logging.INFO)
23
  logger = logging.getLogger(__name__)
@@ -322,10 +324,10 @@ classifier_tokenizer = None
322
 
323
  def load_model():
324
  """Load your trained model here"""
325
- global gemma_model, gemma_tokenizer, classifier_model, classifier_tokenizer
326
 
327
  try:
328
- device = "cuda" if torch.cuda.is_available() else "cpu"
329
 
330
  # --- Model 1: Equation Extractor (Gemma-3 with Unsloth) ---
331
  extractor_adapter_repo = "arvindsuresh-math/gemma-3-1b-equation-extractor-lora"
@@ -392,6 +394,8 @@ def load_model():
392
  except Exception as e:
393
  logger.error(f"Error loading model: {e}")
394
  return f"Error loading model: {e}"
 
 
395
 
396
  @spaces.GPU
397
  def analyze_single(math_question: str, proposed_solution: str, debug: bool = False):
@@ -401,6 +405,8 @@ def analyze_single(math_question: str, proposed_solution: str, debug: bool = Fal
401
  Stage 2: conceptual/correct check via Phi-4 classifier.
402
  Returns: {"classification": "...", "confidence": "...", "explanation": "..."}
403
  """
 
 
404
  # -----------------------------
405
  # STAGE 1: COMPUTATIONAL CHECK
406
  # -----------------------------
@@ -506,13 +512,13 @@ def classify_solution(question: str, solution: str):
506
  if not question.strip() or not solution.strip():
507
  return "Please fill in both fields", 0.0, ""
508
 
509
- if not model or not tokenizer:
510
- return "Model not loaded", 0.0, ""
511
 
512
  try:
513
  res = analyze_single(question, solution)
514
 
515
- return list(res.values())
516
  except Exception:
517
  logger.exception("inference failed")
518
 
 
18
  import math
19
  import time
20
 
21
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
+
23
  # Set up logging
24
  logging.basicConfig(level=logging.INFO)
25
  logger = logging.getLogger(__name__)
 
324
 
325
  def load_model():
326
  """Load your trained model here"""
327
+ global gemma_model, gemma_tokenizer, classifier_model, classifier_tokenizer, DEVICE
328
 
329
  try:
330
+ device = DEVICE
331
 
332
  # --- Model 1: Equation Extractor (Gemma-3 with Unsloth) ---
333
  extractor_adapter_repo = "arvindsuresh-math/gemma-3-1b-equation-extractor-lora"
 
394
  except Exception as e:
395
  logger.error(f"Error loading model: {e}")
396
  return f"Error loading model: {e}"
397
+ def models_ready():
398
+ return all([gemma_model, gemma_tokenizer, classifier_model, classifier_tokenizer])
399
 
400
  @spaces.GPU
401
  def analyze_single(math_question: str, proposed_solution: str, debug: bool = False):
 
405
  Stage 2: conceptual/correct check via Phi-4 classifier.
406
  Returns: {"classification": "...", "confidence": "...", "explanation": "..."}
407
  """
408
+ global DEVICE
409
+ device = DEVICE
410
  # -----------------------------
411
  # STAGE 1: COMPUTATIONAL CHECK
412
  # -----------------------------
 
512
  if not question.strip() or not solution.strip():
513
  return "Please fill in both fields", 0.0, ""
514
 
515
+ if not models_ready():
516
+ return "Models not loaded", 0.0, ""
517
 
518
  try:
519
  res = analyze_single(question, solution)
520
 
521
+ return res["classification"], res["confidence"], res["explanation"]
522
  except Exception:
523
  logger.exception("inference failed")
524
 
deployment_files.py CHANGED
@@ -1,49 +1,49 @@
1
- # requirements.txt
2
- fastapi==0.104.1
3
- uvicorn==0.24.0
4
- torch==2.1.0
5
- transformers==4.35.0
6
- python-multipart==0.0.6
7
- pydantic==2.5.0
8
-
9
- # README.md for your Hugging Face Space
10
- ---
11
- title: Math Solution Classifier
12
- emoji: 🧮
13
- colorFrom: blue
14
- colorTo: purple
15
- sdk: gradio
16
- sdk_version: 4.7.1
17
- app_file: app.py
18
- pinned: false
19
- ---
20
-
21
- # Math Solution Classifier
22
-
23
- This application classifies math solutions into three categories:
24
- - **Correct**: Solution is mathematically sound
25
- - **Conceptually Flawed**: Wrong approach or understanding
26
- - **Computationally Flawed**: Right approach, calculation errors
27
-
28
- ## Usage
29
-
30
- 1. Enter a math question
31
- 2. Enter the proposed solution
32
- 3. Click "Classify Solution"
33
- 4. Get instant feedback on the solution quality
34
-
35
- Built with FastAPI and your custom trained model.
36
-
37
- # Dockerfile (optional, for other hosting platforms)
38
- FROM python:3.9-slim
39
-
40
- WORKDIR /app
41
-
42
- COPY requirements.txt .
43
- RUN pip install --no-cache-dir -r requirements.txt
44
-
45
- COPY . .
46
-
47
- EXPOSE 7860
48
-
49
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ # requirements.txt
2
+ fastapi==0.104.1
3
+ uvicorn==0.24.0
4
+ torch==2.1.0
5
+ transformers==4.35.0
6
+ python-multipart==0.0.6
7
+ pydantic==2.5.0
8
+
9
+ # README.md for your Hugging Face Space
10
+ ---
11
+ title: Math Solution Classifier
12
+ emoji: 🧮
13
+ colorFrom: blue
14
+ colorTo: purple
15
+ sdk: gradio
16
+ sdk_version: 4.7.1
17
+ app_file: app.py
18
+ pinned: false
19
+ ---
20
+
21
+ # Math Solution Classifier
22
+
23
+ This application classifies math solutions into three categories:
24
+ - **Correct**: Solution is mathematically sound
25
+ - **Conceptually Flawed**: Wrong approach or understanding
26
+ - **Computationally Flawed**: Right approach, calculation errors
27
+
28
+ ## Usage
29
+
30
+ 1. Enter a math question
31
+ 2. Enter the proposed solution
32
+ 3. Click "Classify Solution"
33
+ 4. Get instant feedback on the solution quality
34
+
35
+ Built with FastAPI and your custom trained model.
36
+
37
+ # Dockerfile (optional, for other hosting platforms)
38
+ FROM python:3.9-slim
39
+
40
+ WORKDIR /app
41
+
42
+ COPY requirements.txt .
43
+ RUN pip install --no-cache-dir -r requirements.txt
44
+
45
+ COPY . .
46
+
47
+ EXPOSE 7860
48
+
49
  CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
requirements.txt CHANGED
@@ -4,8 +4,4 @@ transformers
4
  peft
5
  accelerate
6
  spaces
7
- unsloth
8
- json
9
- re
10
- math
11
- time
 
4
  peft
5
  accelerate
6
  spaces
7
+ unsloth
 
 
 
 
webapp_TODO.md ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ - [] update prompt given to the model (before the actual question/answer) to match prompt used in training
2
+ - [] figure out how to merge lora adapter and classifier head with base model
3
+ - [] add new examples
4
+ - [] update interface so that one can select types of problems from the examples