Oysiyl commited on
Commit
4e38b02
·
verified ·
1 Parent(s): 29907b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -41
app.py CHANGED
@@ -3,9 +3,11 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStream
3
  import threading
4
  import torch
5
 
6
- # Load model directly
7
- model = AutoModelForCausalLM.from_pretrained("Oysiyl/gemma-3-1B-GRPO")
8
- tokenizer = AutoTokenizer.from_pretrained("Oysiyl/gemma-3-1B-GRPO")
 
 
9
 
10
 
11
  def process_history(history):
@@ -74,9 +76,8 @@ def respond(
74
  output = "Thinking: \n"
75
  for token in streamer:
76
  output += token
77
- # Check for various solution patterns
78
  if "<SOLUTION>" in output:
79
- # Original solution pattern
80
  solution_start = output.find("<SOLUTION>") + len("<SOLUTION>")
81
  solution_end = output.find("</SOLUTION>")
82
  if solution_end > solution_start:
@@ -87,42 +88,6 @@ def respond(
87
  )
88
  yield formatted_output
89
  else:
90
- # Handle case where closing tag is missing
91
- formatted_output = (
92
- output[:solution_start] +
93
- "Final answer: **" + output[solution_start:] + "**"
94
- )
95
- yield formatted_output
96
- # Check if end_working_out tag is present
97
- elif "</end_working_out>" in output:
98
- solution_start = output.find("</end_working_out>") + len("</end_working_out>")
99
- formatted_output = (
100
- output[:solution_start] +
101
- "\nFinal answer: **" + output[solution_start:] + "**"
102
- )
103
- yield formatted_output
104
- # Check if start_working_out is present but end_working_out is missing
105
- elif "<start_working_out>" in output:
106
- # Check if there's a SOLUTION tag after start_working_out
107
- working_start = output.find("<start_working_out>")
108
- if "<SOLUTION>" in output[working_start:]:
109
- solution_start = output.find("<SOLUTION>", working_start) + len("<SOLUTION>")
110
- solution_end = output.find("</SOLUTION>", solution_start)
111
- if solution_end > solution_start:
112
- formatted_output = (
113
- output[:solution_start] +
114
- "Final answer: **" + output[solution_start:solution_end] + "**" +
115
- output[solution_end:]
116
- )
117
- yield formatted_output
118
- else:
119
- formatted_output = (
120
- output[:solution_start] +
121
- "Final answer: **" + output[solution_start:] + "**"
122
- )
123
- yield formatted_output
124
- else:
125
- # No clear solution identified
126
  yield output
127
  else:
128
  yield output
 
3
  import threading
4
  import torch
5
 
6
+ # Load base model directly and then add the adapter
7
+ model = AutoModelForCausalLM.from_pretrained("unsloth/gemma-3-1b-it")
8
+ # Apply adapter from the fine-tuned version
9
+ model.load_adapter("Oysiyl/gemma-3-1B-GRPO")
10
+ tokenizer = AutoTokenizer.from_pretrained("unsloth/gemma-3-1b-it")
11
 
12
 
13
  def process_history(history):
 
76
  output = "Thinking: \n"
77
  for token in streamer:
78
  output += token
79
+ # Check if "<SOLUTION>" token is in the output and format everything after it as bold
80
  if "<SOLUTION>" in output:
 
81
  solution_start = output.find("<SOLUTION>") + len("<SOLUTION>")
82
  solution_end = output.find("</SOLUTION>")
83
  if solution_end > solution_start:
 
88
  )
89
  yield formatted_output
90
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  yield output
92
  else:
93
  yield output