magdap116 commited on
Commit
8324716
·
verified ·
1 Parent(s): 3a057c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -33
app.py CHANGED
@@ -9,39 +9,6 @@ import json
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
  import torch
11
 
12
- class ModelMathTool(Tool):
13
- name = "math_model"
14
- description = "Answers advanced math questions using a pretrained math model."
15
- inputs = {
16
- "problem": {
17
- "type": "string",
18
- "description": "Math problem to solve.",
19
- }
20
- }
21
-
22
- output_type = "string"
23
-
24
- def __init__(self, model_id="Qwen/Qwen2.5-Math-7B"):
25
- print(f"Loading math model: {model_id}")
26
- self.tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
27
- self.model = AutoModelForCausalLM.from_pretrained(
28
- model_id,
29
- torch_dtype=torch.float16,
30
- device_map="auto",
31
- trust_remote_code=True
32
- )
33
-
34
-
35
-
36
-
37
- def forward(self, problem: str) -> str:
38
- print(f"[MathModelTool] Question: {question}")
39
- inputs = self.tokenizer(question, return_tensors="pt").to(self.model.device)
40
- with torch.no_grad():
41
- output = self.model.generate(**inputs, max_new_tokens=256)
42
- response = self.tokenizer.decode(output[0], skip_special_tokens=True)
43
- return response
44
-
45
 
46
  # (Keep Constants as is)
47
  # --- Constants ---
 
9
  from transformers import AutoTokenizer, AutoModelForCausalLM
10
  import torch
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # (Keep Constants as is)
14
  # --- Constants ---