File size: 1,832 Bytes
ebbad26
 
 
 
9b5b26a
59503e8
ebbad26
 
59503e8
 
89dbda5
8c01ffb
59503e8
 
 
 
 
 
 
 
 
 
 
 
 
 
5c1340d
59503e8
ebbad26
59503e8
ebbad26
59503e8
 
ebbad26
f1f3641
59503e8
f235351
ebbad26
59503e8
 
 
 
ebbad26
 
 
0172db2
ebbad26
59503e8
 
 
 
 
 
 
 
 
 
f235351
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import yaml
from smolagents import CodeAgent, HfApiModel
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI

# Simplified system prompt without any code-related instructions
system_prompt = (
    "You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
    "Provide direct and concise lifestyle advice based on the user's details. "
    "Output a clear answer as plain text with no extra commentary."
)

def clean_response(text):
    """
    Simple cleanup function that removes extra whitespace and ensures proper formatting.
    """
    # Remove extra whitespace
    text = ' '.join(text.split())
    # Split into paragraphs for readability
    paragraphs = text.split('\n\n')
    cleaned_paragraphs = [p.strip() for p in paragraphs if p.strip()]
    return '\n\n'.join(cleaned_paragraphs)

# Load prompt templates from YAML
with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)

# Initialize the model with simplified settings
model = HfApiModel(
    max_tokens=1024,
    temperature=0.5,
    model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B/',
    token='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B/'
)

# Initialize CodeAgent with minimal configuration
agent = CodeAgent(
    model=model,
    tools=[FinalAnswerTool()],
    max_steps=1,
    verbosity_level=0,
    name="Health Advisor",
    description=system_prompt,
    prompt_templates=prompt_templates
)

def run_agent(user_input):
    """
    Runs the agent and returns a clean, formatted response.
    """
    try:
        response = agent.run(user_input)
        return clean_response(response)
    except Exception as e:
        return f"I apologize, but I couldn't process your request. Please try again."

# Launch the Gradio UI
GradioUI(agent).launch()