Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse filesFix the label issues so now it actually shows the "thinking" that the model is doing instead of just printing the answers
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import os
|
|
|
2 |
os.environ["GRADIO_ENABLE_SSR"] = "0"
|
3 |
|
4 |
import gradio as gr
|
@@ -28,6 +29,19 @@ def format_rules(rules):
|
|
28 |
formatted_rules += "</rules>\n"
|
29 |
return formatted_rules
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
def format_transcript(transcript):
|
32 |
formatted_transcript = f"<transcript>\n{transcript}\n</transcript>\n"
|
33 |
return formatted_transcript
|
@@ -134,10 +148,10 @@ class ModelWrapper:
|
|
134 |
try:
|
135 |
remainder = output_text.split("Brief explanation\n</explanation>")[-1]
|
136 |
thinking_answer_text = remainder.split("</transcript>")[-1]
|
137 |
-
return thinking_answer_text
|
138 |
except:
|
139 |
input_length = len(message)
|
140 |
-
return output_text[input_length:] if len(output_text) > input_length else "No response generated."
|
141 |
|
142 |
# --- Model Cache ---
|
143 |
LOADED_MODELS = {}
|
|
|
1 |
import os
|
2 |
+
import re
|
3 |
os.environ["GRADIO_ENABLE_SSR"] = "0"
|
4 |
|
5 |
import gradio as gr
|
|
|
29 |
formatted_rules += "</rules>\n"
|
30 |
return formatted_rules
|
31 |
|
32 |
+
def format_output(text):
|
33 |
+
think_match = re.search(r"<think>(.*?)</think>", text, flags=re.DOTALL)
|
34 |
+
answers = re.findall(r"<answer>(.*?)</answer>", text, flags=re.DOTALL)
|
35 |
+
|
36 |
+
display = ""
|
37 |
+
if think_match:
|
38 |
+
display += "Reasoning:\n" + think_match.group(1).strip() + "\n\n"
|
39 |
+
if answers:
|
40 |
+
display += "Answers:\n" + "\n".join([a.strip() for a in answers])
|
41 |
+
return display.strip() if display else text.strip()
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
def format_transcript(transcript):
|
46 |
formatted_transcript = f"<transcript>\n{transcript}\n</transcript>\n"
|
47 |
return formatted_transcript
|
|
|
148 |
try:
|
149 |
remainder = output_text.split("Brief explanation\n</explanation>")[-1]
|
150 |
thinking_answer_text = remainder.split("</transcript>")[-1]
|
151 |
+
return format_output(thinking_answer_text)
|
152 |
except:
|
153 |
input_length = len(message)
|
154 |
+
return format_output(output_text[input_length:]) if len(output_text) > input_length else "No response generated."
|
155 |
|
156 |
# --- Model Cache ---
|
157 |
LOADED_MODELS = {}
|