Spaces:
Sleeping
Sleeping
File size: 3,561 Bytes
c19d193 8564a64 1890de8 6aae614 9b5b26a ea41545 89dbda5 8564a64 ea41545 89dbda5 8c01ffb 8564a64 ea41545 8564a64 ea41545 6aae614 ae7a494 ea41545 e121372 ea41545 89dbda5 8564a64 89dbda5 13d500a 8c01ffb ea41545 861422e 89dbda5 ea41545 1890de8 8fe992b ea41545 8c01ffb 89dbda5 861422e 8fe992b 8564a64 ea41545 8564a64 ea41545 a8ff7de ea41545 a8ff7de ea41545 a8ff7de 8564a64 ea41545 89dbda5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import yaml
import re
from smolagents import CodeAgent, HfApiModel
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Updated system prompt: Only output the final, direct advice in plain text.
system_prompt = (
"You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
"Provide only the final, direct, and concise lifestyle advice based solely on the user's details. "
"Do NOT include any internal reasoning, chain-of-thought, intermediate steps, or code snippets. "
"Output exactly one final answer as plain text with no extra commentary."
)
def remove_code_snippets(text):
"""
Removes code blocks, inline code, chain-of-thought, and debugging/step logs from the output.
"""
# Remove triple-backtick code blocks.
text = re.sub(r"```[\s\S]+?```", "", text, flags=re.DOTALL)
# Remove inline code enclosed in single backticks.
text = re.sub(r"`[^`]+`", "", text)
# Remove any text between <think> and </think> tags.
text = re.sub(r"<think>[\s\S]*?</think>", "", text, flags=re.DOTALL)
# Remove debug/step log banners (e.g., "━━━━━ Step X ━━━━━")
text = re.sub(r"━+.*Step \d+.*━+", "", text)
# Remove any lines that start with "[Step" (which include duration and token info).
text = re.sub(r"\[Step \d+: Duration .*", "", text)
# Remove lines that mention code snippet instructions.
text = re.sub(r"Make sure to include code with the correct pattern.*", "", text)
# Finally, remove any remaining lines that seem to be debug logs.
lines = text.splitlines()
cleaned_lines = [line for line in lines if not re.search(r"Step \d+|Duration", line)]
return "\n".join(cleaned_lines).strip()
# Use only the final_answer tool.
final_answer = FinalAnswerTool()
# Set up the model with a reduced token limit.
model = HfApiModel(
max_tokens=1024,
temperature=0.5,
model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
custom_role_conversions=None,
)
# Load prompt templates from YAML.
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Ensure the final_answer key exists in prompt_templates to prevent KeyError.
if "final_answer" not in prompt_templates:
prompt_templates["final_answer"] = {"pre_messages": "", "post_messages": ""}
# Initialize CodeAgent with a low verbosity level to reduce extra debug output.
agent = CodeAgent(
model=model,
tools=[final_answer],
verbosity_level=0,
grammar=None,
planning_interval=None,
name="Hypertension Prevention Advisor",
description=system_prompt,
prompt_templates=prompt_templates
)
def run_agent(user_input):
"""
Runs the agent, then removes any internal chain-of-thought, step logs, and code snippets
before returning the final plain-text answer.
"""
raw_response = agent.run(user_input)
print("Raw Agent Response:", raw_response) # Debugging output (optional)
if not raw_response.strip():
return "I'm sorry, but I couldn't generate a response. Please try again."
if "final_answer" not in raw_response.lower():
return "Error: The response did not use the `final_answer` tool. Please try again."
clean_response = remove_code_snippets(raw_response)
words = clean_response.split()
if len(set(words)) < 5:
return "I'm unable to generate a meaningful response. Please refine your query."
return clean_response
# Launch the Gradio UI.
GradioUI(agent).launch()
|