ianeksdi commited on
Commit
9fb943a
·
verified ·
1 Parent(s): 8c283a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -78
app.py CHANGED
@@ -1,106 +1,57 @@
1
  import yaml
2
- import re
3
  from smolagents import CodeAgent, HfApiModel
4
  from tools.final_answer import FinalAnswerTool
5
  from Gradio_UI import GradioUI
6
 
7
- # Updated system prompt: Only output the final, direct advice in plain text.
8
  system_prompt = (
9
  "You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
10
- "Provide only the final, direct, and concise lifestyle advice based solely on the user's details. "
11
- "Do NOT include any internal reasoning, chain-of-thought, intermediate steps, or code snippets. "
12
- "Output exactly one final answer as plain text with no extra commentary."
13
  )
14
 
15
- def remove_code_snippets(text):
16
  """
17
- Removes code blocks, inline code, chain-of-thought, error messages, and debugging/step logs from the output.
18
  """
19
- # Remove triple-backtick code blocks.
20
- text = re.sub(r"```[\s\S]+?```", "", text, flags=re.DOTALL)
21
- # Remove inline code enclosed in single backticks.
22
- text = re.sub(r"`[^`]+`", "", text)
23
- # Remove any text between <think> and </think> tags.
24
- text = re.sub(r"<think>[\s\S]*?</think>", "", text, flags=re.DOTALL)
25
- # Remove block from "Make sure to include code with the correct pattern" until "<end_code>"
26
- text = re.sub(r"Make sure to include code with the correct pattern[\s\S]*?<end_code>", "", text, flags=re.DOTALL)
27
-
28
- # Split the text into lines and filter out debug/error/step log lines.
29
- lines = text.splitlines()
30
- filtered_lines = []
31
- for line in lines:
32
- # Skip lines that are clearly debug or error messages.
33
- if re.search(r"Step \d+", line):
34
- continue
35
- if "Duration:" in line:
36
- continue
37
- if "Error in code parsing:" in line:
38
- continue
39
- if "💥 Error" in line:
40
- continue
41
- if "Reached max steps" in line:
42
- continue
43
- if "Make sure to provide correct code blobs" in line:
44
- continue
45
- if "Here is your code snippet:" in line:
46
- continue
47
- if "Code:" in line:
48
- continue
49
- filtered_lines.append(line)
50
-
51
- cleaned_text = "\n".join(filtered_lines)
52
- return cleaned_text.strip()
53
-
54
- # Use only the final_answer tool.
55
- final_answer = FinalAnswerTool()
56
 
57
- # Set up the model with a reduced token limit.
58
  model = HfApiModel(
59
  max_tokens=1024,
60
  temperature=0.5,
61
- model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
62
- custom_role_conversions=None,
63
  )
64
 
65
- # Load prompt templates from YAML.
66
- with open("prompts.yaml", 'r') as stream:
67
- prompt_templates = yaml.safe_load(stream)
68
-
69
- # Ensure the final_answer key exists in prompt_templates to prevent KeyError.
70
- if "final_answer" not in prompt_templates:
71
- prompt_templates["final_answer"] = {"pre_messages": "", "post_messages": ""}
72
-
73
- # Initialize CodeAgent with a low verbosity level to reduce extra debug output.
74
  agent = CodeAgent(
75
  model=model,
76
- tools=[final_answer],
77
  max_steps=1,
78
  verbosity_level=0,
79
- grammar=None,
80
- planning_interval=None,
81
- name="Hypertension Prevention Advisor",
82
  description=system_prompt,
83
  prompt_templates=prompt_templates
84
  )
85
 
86
  def run_agent(user_input):
87
  """
88
- Runs the agent and then removes any internal chain-of-thought, error messages,
89
- debug logs, and code snippet sections before returning the final plain-text answer.
90
  """
91
- raw_response = agent.run(user_input)
92
- print("Raw Agent Response:", raw_response) # Debugging output (optional)
93
-
94
- if not raw_response.strip():
95
- return "I'm sorry, but I couldn't generate a response. Please try again."
96
- if "final_answer" not in raw_response.lower():
97
- return "Error: The response did not use the `final_answer` tool. Please try again."
98
-
99
- clean_response = remove_code_snippets(raw_response)
100
- words = clean_response.split()
101
- if len(set(words)) < 5:
102
- return "I'm unable to generate a meaningful response. Please refine your query."
103
- return clean_response
104
-
105
- # Launch the Gradio UI.
106
- GradioUI(agent).launch()
 
1
  import yaml
 
2
  from smolagents import CodeAgent, HfApiModel
3
  from tools.final_answer import FinalAnswerTool
4
  from Gradio_UI import GradioUI
5
 
6
+ # Simplified system prompt without any code-related instructions
7
  system_prompt = (
8
  "You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
9
+ "Provide direct and concise lifestyle advice based on the user's details. "
10
+ "Output a clear answer as plain text with no extra commentary."
 
11
  )
12
 
13
+ def clean_response(text):
14
  """
15
+ Simple cleanup function that removes extra whitespace and ensures proper formatting.
16
  """
17
+ # Remove extra whitespace
18
+ text = ' '.join(text.split())
19
+ # Split into paragraphs for readability
20
+ paragraphs = text.split('\n\n')
21
+ cleaned_paragraphs = [p.strip() for p in paragraphs if p.strip()]
22
+ return '\n\n'.join(cleaned_paragraphs)
23
+
24
+ # Load prompt templates from YAML
25
+ with open("prompts.yaml", 'r') as stream:
26
+ prompt_templates = yaml.safe_load(stream)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
+ # Initialize the model with simplified settings
29
  model = HfApiModel(
30
  max_tokens=1024,
31
  temperature=0.5,
32
+ model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B'
 
33
  )
34
 
35
+ # Initialize CodeAgent with minimal configuration
 
 
 
 
 
 
 
 
36
  agent = CodeAgent(
37
  model=model,
38
+ tools=[FinalAnswerTool()],
39
  max_steps=1,
40
  verbosity_level=0,
41
+ name="Health Advisor",
 
 
42
  description=system_prompt,
43
  prompt_templates=prompt_templates
44
  )
45
 
46
  def run_agent(user_input):
47
  """
48
+ Runs the agent and returns a clean, formatted response.
 
49
  """
50
+ try:
51
+ response = agent.run(user_input)
52
+ return clean_response(response)
53
+ except Exception as e:
54
+ return f"I apologize, but I couldn't process your request. Please try again."
55
+
56
+ # Launch the Gradio UI
57
+ GradioUI(agent).launch()