ianeksdi commited on
Commit
89dbda5
·
verified ·
1 Parent(s): ae7a494

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -48
app.py CHANGED
@@ -1,69 +1,45 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
- import datetime
3
- import requests
4
- import pytz
5
  import yaml
6
  from tools.final_answer import FinalAnswerTool
7
-
8
  from Gradio_UI import GradioUI
9
 
10
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
- @tool
12
- def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
- #Keep this format for the description / args / args description but feel free to modify the tool
14
- """A tool that does nothing yet
15
- Args:
16
- arg1: the first argument
17
- arg2: the second argument
18
- """
19
- return "What magic will you build ?"
20
-
21
- @tool
22
- def get_current_time_in_timezone(timezone: str) -> str:
23
- """A tool that fetches the current local time in a specified timezone.
24
- Args:
25
- timezone: A string representing a valid timezone (e.g., 'America/New_York').
26
- """
27
- try:
28
- # Create timezone object
29
- tz = pytz.timezone(timezone)
30
- # Get current time in that timezone
31
- local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
32
- return f"The current local time in {timezone} is: {local_time}"
33
- except Exception as e:
34
- return f"Error fetching time for timezone '{timezone}': {str(e)}"
35
-
36
 
 
37
  final_answer = FinalAnswerTool()
38
 
39
- # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
41
-
42
  model = HfApiModel(
43
- max_tokens=2096,
44
- temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
- custom_role_conversions=None,
47
  )
48
 
49
-
50
- # Import tool from Hub
51
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
52
-
53
  with open("prompts.yaml", 'r') as stream:
54
  prompt_templates = yaml.safe_load(stream)
55
-
 
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
62
  planning_interval=None,
63
- name=None,
64
- description=None,
65
  prompt_templates=prompt_templates
66
  )
67
 
68
-
69
- GradioUI(agent).launch()
 
1
+ from smolagents import CodeAgent, HfApiModel
 
 
 
2
  import yaml
3
  from tools.final_answer import FinalAnswerTool
 
4
  from Gradio_UI import GradioUI
5
 
6
+ # Define the system prompt for hypertension prevention advice.
7
+ system_prompt = (
8
+ "You are a health and lifestyle advisor specializing in the early detection and prevention of hypertension. "
9
+ "The diagnostic criteria are as follows: Normal BP is < 120/80 mmHg, Borderline BP is 120-139/80-89 mmHg, "
10
+ "and Hypertension is > 140/90 mmHg. Provide direct, concise, and actionable advice based solely on the user's details. "
11
+ "Do NOT include any internal reasoning, chain-of-thought, or meta commentary in your final response. "
12
+ "Only output the final advice. For example, if the user mentions alcohol consumption, simply say: "
13
+ "'Reduce alcohol intake, as it can raise blood pressure.'"
14
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # Use only the final_answer tool.
17
  final_answer = FinalAnswerTool()
18
 
19
+ # Set up your model.
 
 
20
  model = HfApiModel(
21
+ max_tokens=2096,
22
+ temperature=0.5,
23
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct', # This model might be overloaded; consider alternatives if needed.
24
+ custom_role_conversions=None,
25
  )
26
 
27
+ # Load prompt templates from a YAML file.
 
 
 
28
  with open("prompts.yaml", 'r') as stream:
29
  prompt_templates = yaml.safe_load(stream)
30
+
31
+ # Initialize the CodeAgent with the final_answer tool and your system prompt as its description.
32
  agent = CodeAgent(
33
  model=model,
34
+ tools=[final_answer], # Only keep the final answer tool.
35
  max_steps=6,
36
  verbosity_level=1,
37
  grammar=None,
38
  planning_interval=None,
39
+ name="Hypertension Prevention Advisor",
40
+ description=system_prompt,
41
  prompt_templates=prompt_templates
42
  )
43
 
44
+ # Launch the Gradio UI.
45
+ GradioUI(agent).launch()