ianeksdi commited on
Commit
ea41545
·
verified ·
1 Parent(s): a8ff7de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -33
app.py CHANGED
@@ -4,43 +4,60 @@ from smolagents import CodeAgent, HfApiModel
4
  from tools.final_answer import FinalAnswerTool
5
  from Gradio_UI import GradioUI
6
 
7
- # 🚀 Updated system prompt: No code recognition or extraction allowed.
8
  system_prompt = (
9
  "You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
10
  "Provide only the final, direct, and concise lifestyle advice based solely on the user's details. "
11
- "Do NOT recognize, extract, or output any code snippets, programming-related information, or technical explanations. "
12
- "Only output the final advice as plain text. "
13
- "For example, if the user mentions alcohol consumption, simply say: 'Reduce alcohol intake, as it can raise blood pressure.'"
14
  )
15
 
16
- # 🛑 Function to remove any detected code snippets.
17
  def remove_code_snippets(text):
18
- """Removes potential code snippets from the output."""
19
- pattern = r"```[\s\S]+?```|`[^`]+`|(?:\bimport\b|\bdef\b|\bclass\b|\breturn\b|\bprint\b|\bif\b|\belse\b|\btry\b|\bexcept\b|\bwhile\b|\bfor\b)"
20
- clean_text = re.sub(pattern, "", text, flags=re.DOTALL | re.IGNORECASE).strip()
21
- return clean_text if clean_text else "Invalid response generated. Please try again."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- # Use only the final_answer tool.
24
  final_answer = FinalAnswerTool()
25
 
26
- # 🎯 Model setup with reduced token limit to prevent long outputs
27
  model = HfApiModel(
28
- max_tokens=1024, # Reduced from 2096 to prevent long responses
29
  temperature=0.5,
30
  model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
31
  custom_role_conversions=None,
32
  )
33
 
34
- # 📖 Load prompt templates from YAML file.
35
  with open("prompts.yaml", 'r') as stream:
36
  prompt_templates = yaml.safe_load(stream)
37
 
38
- # 🔄 Initialize CodeAgent (blocking code extraction)
 
 
 
 
39
  agent = CodeAgent(
40
  model=model,
41
- tools=[final_answer], # Restricted to final advice only.
42
- max_steps=4, # Reduced from 6 to minimize infinite loops
43
- verbosity_level=1,
44
  grammar=None,
45
  planning_interval=None,
46
  name="Hypertension Prevention Advisor",
@@ -48,31 +65,24 @@ agent = CodeAgent(
48
  prompt_templates=prompt_templates
49
  )
50
 
51
- # 🛑 Run agent with filtering.
52
  def run_agent(user_input):
53
- """Runs the agent, ensuring valid output and blocking infinite loops."""
 
 
 
54
  raw_response = agent.run(user_input)
55
-
56
- # Debugging: Print raw response
57
- print("Raw Agent Response:", raw_response)
58
-
59
- # Prevent infinite loops and blank responses
60
  if not raw_response.strip():
61
  return "I'm sorry, but I couldn't generate a response. Please try again."
62
-
63
- # Ensure only final_answer is used
64
  if "final_answer" not in raw_response.lower():
65
  return "Error: The response did not use the `final_answer` tool. Please try again."
66
-
67
- # Remove code snippets
68
  clean_response = remove_code_snippets(raw_response)
69
-
70
- # Detect and prevent repeated phrases (possible infinite loop indicator)
71
  words = clean_response.split()
72
- if len(set(words)) < 5: # Arbitrary threshold for detecting repetition
73
  return "I'm unable to generate a meaningful response. Please refine your query."
74
-
75
  return clean_response
76
 
77
- # 🚀 Launch the Gradio UI.
78
  GradioUI(agent).launch()
 
4
  from tools.final_answer import FinalAnswerTool
5
  from Gradio_UI import GradioUI
6
 
7
+ # Updated system prompt: Only output the final, direct advice in plain text.
8
  system_prompt = (
9
  "You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
10
  "Provide only the final, direct, and concise lifestyle advice based solely on the user's details. "
11
+ "Do NOT include any internal reasoning, chain-of-thought, intermediate steps, or code snippets. "
12
+ "Output exactly one final answer as plain text with no extra commentary."
 
13
  )
14
 
 
15
  def remove_code_snippets(text):
16
+ """
17
+ Removes code blocks, inline code, chain-of-thought, and debugging/step logs from the output.
18
+ """
19
+ # Remove triple-backtick code blocks.
20
+ text = re.sub(r"```[\s\S]+?```", "", text, flags=re.DOTALL)
21
+ # Remove inline code enclosed in single backticks.
22
+ text = re.sub(r"`[^`]+`", "", text)
23
+ # Remove any text between <think> and </think> tags.
24
+ text = re.sub(r"<think>[\s\S]*?</think>", "", text, flags=re.DOTALL)
25
+ # Remove debug/step log banners (e.g., "━━━━━ Step X ━━━━━")
26
+ text = re.sub(r"━+.*Step \d+.*━+", "", text)
27
+ # Remove any lines that start with "[Step" (which include duration and token info).
28
+ text = re.sub(r"\[Step \d+: Duration .*", "", text)
29
+ # Remove lines that mention code snippet instructions.
30
+ text = re.sub(r"Make sure to include code with the correct pattern.*", "", text)
31
+ # Finally, remove any remaining lines that seem to be debug logs.
32
+ lines = text.splitlines()
33
+ cleaned_lines = [line for line in lines if not re.search(r"Step \d+|Duration", line)]
34
+ return "\n".join(cleaned_lines).strip()
35
 
36
+ # Use only the final_answer tool.
37
  final_answer = FinalAnswerTool()
38
 
39
+ # Set up the model with a reduced token limit.
40
  model = HfApiModel(
41
+ max_tokens=1024,
42
  temperature=0.5,
43
  model_id='deepseek-ai/DeepSeek-R1-Distill-Qwen-32B',
44
  custom_role_conversions=None,
45
  )
46
 
47
+ # Load prompt templates from YAML.
48
  with open("prompts.yaml", 'r') as stream:
49
  prompt_templates = yaml.safe_load(stream)
50
 
51
+ # Ensure the final_answer key exists in prompt_templates to prevent KeyError.
52
+ if "final_answer" not in prompt_templates:
53
+ prompt_templates["final_answer"] = {"pre_messages": "", "post_messages": ""}
54
+
55
+ # Initialize CodeAgent with a low verbosity level to reduce extra debug output.
56
  agent = CodeAgent(
57
  model=model,
58
+ tools=[final_answer],
59
+ max_steps=4,
60
+ verbosity_level=0,
61
  grammar=None,
62
  planning_interval=None,
63
  name="Hypertension Prevention Advisor",
 
65
  prompt_templates=prompt_templates
66
  )
67
 
 
68
  def run_agent(user_input):
69
+ """
70
+ Runs the agent, then removes any internal chain-of-thought, step logs, and code snippets
71
+ before returning the final plain-text answer.
72
+ """
73
  raw_response = agent.run(user_input)
74
+ print("Raw Agent Response:", raw_response) # Debugging output (optional)
75
+
 
 
 
76
  if not raw_response.strip():
77
  return "I'm sorry, but I couldn't generate a response. Please try again."
 
 
78
  if "final_answer" not in raw_response.lower():
79
  return "Error: The response did not use the `final_answer` tool. Please try again."
80
+
 
81
  clean_response = remove_code_snippets(raw_response)
 
 
82
  words = clean_response.split()
83
+ if len(set(words)) < 5:
84
  return "I'm unable to generate a meaningful response. Please refine your query."
 
85
  return clean_response
86
 
87
+ # Launch the Gradio UI.
88
  GradioUI(agent).launch()