Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import yaml
|
2 |
import re
|
3 |
-
from smolagents import HfApiModel
|
4 |
-
from tools.final_answer import FinalAnswerTool
|
5 |
from Gradio_UI import GradioUI
|
6 |
|
7 |
# Updated system prompt: Only output the final, direct advice in plain text.
|
@@ -28,11 +28,14 @@ def remove_code_snippets(text):
|
|
28 |
text = re.sub(r"\[Step \d+: Duration .*", "", text)
|
29 |
# Remove lines that mention code snippet instructions.
|
30 |
text = re.sub(r"Make sure to include code with the correct pattern.*", "", text)
|
31 |
-
#
|
32 |
lines = text.splitlines()
|
33 |
cleaned_lines = [line for line in lines if not re.search(r"Step \d+|Duration", line)]
|
34 |
return "\n".join(cleaned_lines).strip()
|
35 |
|
|
|
|
|
|
|
36 |
# Set up the model with a reduced token limit.
|
37 |
model = HfApiModel(
|
38 |
max_tokens=1024,
|
@@ -41,41 +44,34 @@ model = HfApiModel(
|
|
41 |
custom_role_conversions=None,
|
42 |
)
|
43 |
|
44 |
-
# Load prompt templates
|
45 |
with open("prompts.yaml", 'r') as stream:
|
46 |
prompt_templates = yaml.safe_load(stream)
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
"""
|
50 |
-
|
51 |
-
|
52 |
-
After generation, it removes any chain-of-thought, debug logs, or code snippets.
|
53 |
-
Accepts extra keyword arguments to be compatible with Gradio's expectations.
|
54 |
"""
|
55 |
-
|
56 |
-
|
57 |
-
raw_response = model.run(prompt)
|
58 |
-
except AttributeError:
|
59 |
-
raw_response = model(prompt)
|
60 |
-
|
61 |
-
print("Raw Model Response:", raw_response) # Debug output (optional)
|
62 |
|
63 |
if not raw_response.strip():
|
64 |
return "I'm sorry, but I couldn't generate a response. Please try again."
|
|
|
|
|
65 |
|
66 |
clean_response = remove_code_snippets(raw_response)
|
67 |
words = clean_response.split()
|
68 |
-
if len(set(words)) < 5:
|
69 |
return "I'm unable to generate a meaningful response. Please refine your query."
|
70 |
return clean_response
|
71 |
|
72 |
-
|
73 |
-
def __init__(self, run_function):
|
74 |
-
self.run_function = run_function
|
75 |
-
|
76 |
-
def run(self, *args, **kwargs):
|
77 |
-
return self.run_function(*args, **kwargs)
|
78 |
-
|
79 |
-
agent = SimpleAgent(run_agent)
|
80 |
-
|
81 |
GradioUI(agent).launch()
|
|
|
1 |
import yaml
|
2 |
import re
|
3 |
+
from smolagents import HfApiModel
|
4 |
+
from tools.final_answer import FinalAnswerTool
|
5 |
from Gradio_UI import GradioUI
|
6 |
|
7 |
# Updated system prompt: Only output the final, direct advice in plain text.
|
|
|
28 |
text = re.sub(r"\[Step \d+: Duration .*", "", text)
|
29 |
# Remove lines that mention code snippet instructions.
|
30 |
text = re.sub(r"Make sure to include code with the correct pattern.*", "", text)
|
31 |
+
# Finally, remove any remaining lines that seem to be debug logs.
|
32 |
lines = text.splitlines()
|
33 |
cleaned_lines = [line for line in lines if not re.search(r"Step \d+|Duration", line)]
|
34 |
return "\n".join(cleaned_lines).strip()
|
35 |
|
36 |
+
# Use only the final_answer tool.
|
37 |
+
final_answer = FinalAnswerTool()
|
38 |
+
|
39 |
# Set up the model with a reduced token limit.
|
40 |
model = HfApiModel(
|
41 |
max_tokens=1024,
|
|
|
44 |
custom_role_conversions=None,
|
45 |
)
|
46 |
|
47 |
+
# Load prompt templates from YAML.
|
48 |
with open("prompts.yaml", 'r') as stream:
|
49 |
prompt_templates = yaml.safe_load(stream)
|
50 |
|
51 |
+
# Ensure the final_answer key exists in prompt_templates to prevent KeyError.
|
52 |
+
if "final_answer" not in prompt_templates:
|
53 |
+
prompt_templates["final_answer"] = {"pre_messages": "", "post_messages": ""}
|
54 |
+
|
55 |
+
# Initialize CodeAgent with a low verbosity level to reduce extra debug output.
|
56 |
+
|
57 |
+
def run_agent(user_input):
|
58 |
"""
|
59 |
+
Runs the agent, then removes any internal chain-of-thought, step logs, and code snippets
|
60 |
+
before returning the final plain-text answer.
|
|
|
|
|
61 |
"""
|
62 |
+
raw_response = agent.run(user_input)
|
63 |
+
print("Raw Agent Response:", raw_response) # Debugging output (optional)
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
if not raw_response.strip():
|
66 |
return "I'm sorry, but I couldn't generate a response. Please try again."
|
67 |
+
if "final_answer" not in raw_response.lower():
|
68 |
+
return "Error: The response did not use the `final_answer` tool. Please try again."
|
69 |
|
70 |
clean_response = remove_code_snippets(raw_response)
|
71 |
words = clean_response.split()
|
72 |
+
if len(set(words)) < 5:
|
73 |
return "I'm unable to generate a meaningful response. Please refine your query."
|
74 |
return clean_response
|
75 |
|
76 |
+
# Launch the Gradio UI.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
GradioUI(agent).launch()
|