Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import yaml
|
2 |
import re
|
3 |
-
from smolagents import CodeAgent
|
4 |
-
from tools.final_answer import FinalAnswerTool
|
5 |
from Gradio_UI import GradioUI
|
6 |
|
7 |
# Updated system prompt: Only output the final, direct advice in plain text.
|
@@ -28,14 +28,11 @@ def remove_code_snippets(text):
|
|
28 |
text = re.sub(r"\[Step \d+: Duration .*", "", text)
|
29 |
# Remove lines that mention code snippet instructions.
|
30 |
text = re.sub(r"Make sure to include code with the correct pattern.*", "", text)
|
31 |
-
#
|
32 |
lines = text.splitlines()
|
33 |
cleaned_lines = [line for line in lines if not re.search(r"Step \d+|Duration", line)]
|
34 |
return "\n".join(cleaned_lines).strip()
|
35 |
|
36 |
-
# Use only the final_answer tool.
|
37 |
-
final_answer = FinalAnswerTool()
|
38 |
-
|
39 |
# Set up the model with a reduced token limit.
|
40 |
model = HfApiModel(
|
41 |
max_tokens=1024,
|
@@ -44,45 +41,24 @@ model = HfApiModel(
|
|
44 |
custom_role_conversions=None,
|
45 |
)
|
46 |
|
47 |
-
# Load prompt templates
|
48 |
with open("prompts.yaml", 'r') as stream:
|
49 |
prompt_templates = yaml.safe_load(stream)
|
50 |
|
51 |
-
# Ensure the final_answer key exists in prompt_templates to prevent KeyError.
|
52 |
-
if "final_answer" not in prompt_templates:
|
53 |
-
prompt_templates["final_answer"] = {"pre_messages": "", "post_messages": ""}
|
54 |
-
|
55 |
-
# Initialize CodeAgent with a low verbosity level to reduce extra debug output.
|
56 |
-
agent = CodeAgent(
|
57 |
-
model=model,
|
58 |
-
tools=[final_answer],
|
59 |
-
max_steps=1,
|
60 |
-
verbosity_level=0,
|
61 |
-
grammar=None,
|
62 |
-
planning_interval=None,
|
63 |
-
name="Hypertension Prevention Advisor",
|
64 |
-
description=system_prompt,
|
65 |
-
prompt_templates=prompt_templates
|
66 |
-
)
|
67 |
-
|
68 |
def run_agent(user_input):
|
69 |
"""
|
70 |
-
|
71 |
-
|
|
|
72 |
"""
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
|
77 |
-
return "I'm sorry, but I couldn't generate a response. Please try again."
|
78 |
-
if "final_answer" not in raw_response.lower():
|
79 |
-
return "Error: The response did not use the `final_answer` tool. Please try again."
|
80 |
|
81 |
-
|
82 |
-
words = clean_response.split()
|
83 |
-
if len(set(words)) < 5:
|
84 |
-
return "I'm unable to generate a meaningful response. Please refine your query."
|
85 |
-
return clean_response
|
86 |
-
|
87 |
-
# Launch the Gradio UI.
|
88 |
-
GradioUI(agent).launch()
|
|
|
1 |
import yaml
|
2 |
import re
|
3 |
+
from smolagents import HfApiModel # Removed CodeAgent import.
|
4 |
+
from tools.final_answer import FinalAnswerTool # Retained in case you need it.
|
5 |
from Gradio_UI import GradioUI
|
6 |
|
7 |
# Updated system prompt: Only output the final, direct advice in plain text.
|
|
|
28 |
text = re.sub(r"\[Step \d+: Duration .*", "", text)
|
29 |
# Remove lines that mention code snippet instructions.
|
30 |
text = re.sub(r"Make sure to include code with the correct pattern.*", "", text)
|
31 |
+
# Remove any remaining lines that seem to be debug logs.
|
32 |
lines = text.splitlines()
|
33 |
cleaned_lines = [line for line in lines if not re.search(r"Step \d+|Duration", line)]
|
34 |
return "\n".join(cleaned_lines).strip()
|
35 |
|
|
|
|
|
|
|
36 |
# Set up the model with a reduced token limit.
|
37 |
model = HfApiModel(
|
38 |
max_tokens=1024,
|
|
|
41 |
custom_role_conversions=None,
|
42 |
)
|
43 |
|
44 |
+
# Load prompt templates if needed (unused here but kept for compatibility).
|
45 |
with open("prompts.yaml", 'r') as stream:
|
46 |
prompt_templates = yaml.safe_load(stream)
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
def run_agent(user_input):
|
49 |
"""
|
50 |
+
Constructs a prompt from the system prompt and the user input,
|
51 |
+
then directly calls the model to generate an answer.
|
52 |
+
After generation, it removes any chain-of-thought, debug logs, or code snippets.
|
53 |
"""
|
54 |
+
prompt = f"{system_prompt}\nUser Input: {user_input}\nFinal Answer:"
|
55 |
+
try:
|
56 |
+
# Try using the model's run method if available.
|
57 |
+
raw_response = model.run(prompt)
|
58 |
+
except AttributeError:
|
59 |
+
# Otherwise, assume the model is callable.
|
60 |
+
raw_response = model(prompt)
|
61 |
|
62 |
+
print("Raw Model Response:", raw_response) # Debug output (optional)
|
|
|
|
|
|
|
63 |
|
64 |
+
if not raw_respon
|
|
|
|
|
|
|
|
|
|
|
|
|
|