ianeksdi commited on
Commit
89fabcd
·
verified ·
1 Parent(s): a1d78f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -28
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import yaml
2
  import re
3
- from smolagents import CodeAgent, HfApiModel
4
  from tools.final_answer import FinalAnswerTool
5
  from Gradio_UI import GradioUI
6
 
@@ -28,12 +28,12 @@ def remove_code_snippets(text):
28
  text = re.sub(r"\[Step \d+: Duration .*", "", text)
29
  # Remove lines that mention code snippet instructions.
30
  text = re.sub(r"Make sure to include code with the correct pattern.*", "", text)
31
- # Finally, remove any remaining lines that seem to be debug logs.
32
  lines = text.splitlines()
33
  cleaned_lines = [line for line in lines if not re.search(r"Step \d+|Duration", line)]
34
  return "\n".join(cleaned_lines).strip()
35
 
36
- # Use only the final_answer tool.
37
  final_answer = FinalAnswerTool()
38
 
39
  # Set up the model with a reduced token limit.
@@ -44,38 +44,22 @@ model = HfApiModel(
44
  custom_role_conversions=None,
45
  )
46
 
47
- # Load prompt templates from YAML.
48
  with open("prompts.yaml", 'r') as stream:
49
  prompt_templates = yaml.safe_load(stream)
50
 
51
- # Ensure the final_answer key exists in prompt_templates to prevent KeyError.
52
- if "final_answer" not in prompt_templates:
53
- prompt_templates["final_answer"] = {"pre_messages": "", "post_messages": ""}
54
-
55
- # Initialize CodeAgent with a low verbosity level to reduce extra debug output.
56
- agent = CodeAgent(
57
- model=model,
58
- tools=[final_answer],
59
- verbosity_level=0,
60
- grammar=None,
61
- planning_interval=None,
62
- name="Hypertension Prevention Advisor",
63
- description=system_prompt,
64
- prompt_templates=prompt_templates
65
- )
66
-
67
  def run_agent(user_input):
68
  """
69
- Runs the agent, then removes any internal chain-of-thought, step logs, and code snippets
70
- before returning the final plain-text answer.
 
71
  """
72
- raw_response = agent.run(user_input)
73
- print("Raw Agent Response:", raw_response) # Debugging output (optional)
 
74
 
75
  if not raw_response.strip():
76
  return "I'm sorry, but I couldn't generate a response. Please try again."
77
- if "final_answer" not in raw_response.lower():
78
- return "Error: The response did not use the `final_answer` tool. Please try again."
79
 
80
  clean_response = remove_code_snippets(raw_response)
81
  words = clean_response.split()
@@ -83,5 +67,5 @@ def run_agent(user_input):
83
  return "I'm unable to generate a meaningful response. Please refine your query."
84
  return clean_response
85
 
86
- # Launch the Gradio UI.
87
- GradioUI(agent).launch()
 
1
  import yaml
2
  import re
3
+ from smolagents import HfApiModel # Removed CodeAgent import.
4
  from tools.final_answer import FinalAnswerTool
5
  from Gradio_UI import GradioUI
6
 
 
28
  text = re.sub(r"\[Step \d+: Duration .*", "", text)
29
  # Remove lines that mention code snippet instructions.
30
  text = re.sub(r"Make sure to include code with the correct pattern.*", "", text)
31
+ # Remove any remaining lines that seem to be debug logs.
32
  lines = text.splitlines()
33
  cleaned_lines = [line for line in lines if not re.search(r"Step \d+|Duration", line)]
34
  return "\n".join(cleaned_lines).strip()
35
 
36
+ # (Optional) Retain final_answer tool instance if needed elsewhere.
37
  final_answer = FinalAnswerTool()
38
 
39
  # Set up the model with a reduced token limit.
 
44
  custom_role_conversions=None,
45
  )
46
 
47
+ # Load prompt templates from YAML (if required elsewhere)
48
  with open("prompts.yaml", 'r') as stream:
49
  prompt_templates = yaml.safe_load(stream)
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  def run_agent(user_input):
52
  """
53
+ Constructs a prompt using the system prompt and the user input,
54
+ then directly calls the model to generate an answer.
55
+ After generation, it removes any chain-of-thought, debug logs, or code snippets.
56
  """
57
+ prompt = f"{system_prompt}\nUser Input: {user_input}\nFinal Answer:"
58
+ raw_response = model.run(prompt) # Directly using the model.
59
+ print("Raw Model Response:", raw_response) # Debugging output (optional)
60
 
61
  if not raw_response.strip():
62
  return "I'm sorry, but I couldn't generate a response. Please try again."
 
 
63
 
64
  clean_response = remove_code_snippets(raw_response)
65
  words = clean_response.split()
 
67
  return "I'm unable to generate a meaningful response. Please refine your query."
68
  return clean_response
69
 
70
+ # Launch the Gradio UI using the run_agent function.
71
+ GradioUI(run_agent).launch()