Spaces:
Sleeping
Sleeping
File size: 1,735 Bytes
89dbda5 c19d193 6aae614 9b5b26a 89dbda5 8c01ffb 89dbda5 6aae614 ae7a494 89dbda5 e121372 89dbda5 34da94a 89dbda5 13d500a 8c01ffb 89dbda5 861422e 89dbda5 8c01ffb 8fe992b 89dbda5 8c01ffb 89dbda5 861422e 8fe992b 89dbda5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from smolagents import CodeAgent, HfApiModel
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# Define the system prompt for hypertension prevention advice.
system_prompt = (
"You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
"The diagnostic criteria are as follows: Normal BP is < 120/80 mmHg, Borderline BP is 120-139/80-89 mmHg, "
"and Hypertension is > 140/90 mmHg. Provide direct, concise, and actionable advice based solely on the user's details. "
"Do NOT include any internal reasoning, chain-of-thought, or meta commentary in your final response. "
"Only output the final advice. For example, if the user mentions alcohol consumption, simply say: "
"'Reduce alcohol intake, as it can raise blood pressure.'"
)
# Use only the final_answer tool.
final_answer = FinalAnswerTool()
# Set up your model.
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B', # This model might be overloaded; consider alternatives if needed.
custom_role_conversions=None,
)
# Load prompt templates from a YAML file.
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Initialize the CodeAgent with the final_answer tool and your system prompt as its description.
agent = CodeAgent(
model=model,
tools=[final_answer], # Only keep the final answer tool.
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name="Hypertension Prevention Advisor",
description=system_prompt,
prompt_templates=prompt_templates
)
# Launch the Gradio UI.
GradioUI(agent).launch()
|