victoria-latynina commited on
Commit
a11f044
·
verified ·
1 Parent(s): 67fb37e

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +29 -9
  2. app.py +88 -50
  3. wellbe_agent.py +84 -0
README.md CHANGED
@@ -1,14 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
- title: WellBePlusAssistant
3
- emoji: 💬
4
- colorFrom: yellow
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 5.0.1
8
  app_file: app.py
9
- pinned: false
10
- license: mit
11
- short_description: AI assistant who answers health and drug related questions
12
  ---
13
 
14
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
1
+ <img src="docs/images/logo.png" alt="Healthcare AI Agent logo" width="160">
2
+
3
+ # WellBe+ Assistant
4
+
5
+ WellBe+ AI Agent is a multi-context assistant built with the agents SDK framework from OpenAI. It orchestrates three secure MCP servers: a public drug-knowledge FDA service, a WHOOP biometric feed, and a MySQL clinical data store. Every user question is answered with up-to-date medication data. For example, the agent can answer to more generic questions such as “What are the adverse effects of Prozac?” or more specific ones including complex queries where both FDA and Whoop data can be combined: "I started to take Prozac 3 months ago, do you see any trends in my sleep quality and what are its most frequent side-effects?" The same architecture scales to workout recovery tips, drug interaction cross-checks, or longitudinal trend insights.
6
+
7
+ ## MCP Servers in use
8
+
9
+ [Whoop](https://smithery.ai/server/@ctvidic/whoop-mcp-server)
10
+
11
+ [Healthcare MCP with PubMed, FDA and other APIs](https://smithery.ai/server/@Cicatriiz/healthcare-mcp-public)
12
+
13
+ [MySQL + custom drug interaction](https://smithery.ai/server/mysql-mcp-server)
14
+
15
+ ## How to start the application
16
+
17
+ If you plan to use our custom database:
18
+
19
+ - **Install Docker**: Follow the official guide → [Docker Engine installation](https://docs.docker.com/engine/install/)
20
+ - **Install Docker Compose**: Follow the plugin guide → [Docker Compose installation](https://docs.docker.com/compose/install/) 
21
+ - Run `docker-compose up` in the project directory (make sure data/ folder is not empty)
22
+ - Add credentials
23
+
24
  ---
25
+ title: WellBe+ Assistant
 
 
 
26
  sdk: gradio
27
+ sdk_version: 5.33.0
28
  app_file: app.py
29
+ pinned: true
30
+ tags:
31
+ - agent-demo-track
32
  ---
33
 
34
+ Try it on [HuggingFace Spaces](https://huggingface.co/spaces/natasha1704/WellBePlusAssistant)
app.py CHANGED
@@ -1,64 +1,102 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
  """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
6
  """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
8
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
 
 
 
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- response = ""
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
1
  """
2
+ Gradio UI entry‑point for the WellBe+ Assistant.
3
+
4
+ This file focuses *exclusively* on building and launching the UI. Business
5
+ logic remains in ``wellbe_agent.py`` so that the web interface stays lean and
6
+ maintainable.
7
  """
8
+ from __future__ import annotations
9
+ import gradio as gr
10
 
11
+ from wellbe_agent import answer_sync
12
 
 
 
 
 
 
 
 
 
 
13
 
14
+ DESCRIPTION_MD = (
15
+ "# **WellBe+ Assistant**\n\n"
16
+ "WellBe+ AI Agent is a multi‑context assistant built with the Agents SDK framework "
17
+ "from OpenAI. It orchestrates **three secure MCP servers**: a public drug‑knowledge FDA "
18
+ "service, a WHOOP biometric feed, and a MySQL clinical data store.\n\n"
19
+ "Ask how medications, habits or workouts influence your sleep and recovery – the agent "
20
+ "combines **medical knowledge** with your **personal Whoop data** to craft evidence‑backed "
21
+ "answers.\n\n"
22
+ "### Example questions\n"
23
+ "- *What are the adverse effects of Tamoxifen?*\n"
24
+ "- *I started taking Prozac three months ago. Have you noticed any trends in my sleep quality? Also, what are its most common side effects?*\n\n"
25
+ "### MCP Servers in use\n"
26
+ "- [Whoop](https://smithery.ai/server/@ctvidic/whoop-mcp-server)\n"
27
+ "- [Healthcare MCP with PubMed, FDA and other APIs](https://smithery.ai/server/@Cicatriiz/healthcare-mcp-public)\n"
28
+ )
29
 
30
+ video_embed = """
31
+ <div style="text-align: center;">
32
+ <h3>How to Use WellBe+ Assistant</h3>
33
+ <iframe width="560" height="315"
34
+ src="https://www.youtube.com/embed/iP4L3cLgD84"
35
+ title="Our Demo on YouTube"
36
+ frameborder="0"
37
+ allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
38
+ allowfullscreen>
39
+ </iframe>
40
+ </div>
41
+ """
42
 
43
+ logo_html = """
44
+ <img src="https://raw.githubusercontent.com/Alea4jacta6est/drug-interaction-checker-agent/dev/docs/images/logo.png"
45
+ alt="WellBe+" width="80">
46
+ """
47
 
48
+ def build_interface() -> gr.Blocks:
49
+ """Construct and return the Gradio interface for the WellBe+ Assistant."""
50
+ with gr.Blocks(title="WellBe+ Assistant") as app:
51
+ gr.HTML(logo_html)
52
+ with gr.Row():
53
+ with gr.Column(scale=2):
54
+ gr.Markdown(DESCRIPTION_MD)
55
+ with gr.Column(scale=1):
56
+ gr.HTML(video_embed)
57
+ with gr.Row():
58
+ # -----------------------------------------------------------------
59
+ # Left column – credential inputs
60
+ # -----------------------------------------------------------------
61
+ with gr.Column(scale=1):
62
+ gr.Markdown("### Credentials")
63
+ openai_key_box = gr.Textbox(
64
+ label="OpenAI API Key",
65
+ type="password",
66
+ placeholder="sk‑…",
67
+ )
68
+ whoop_email_box = gr.Textbox(label="Whoop e‑mail")
69
+ whoop_pass_box = gr.Textbox(label="Whoop password", type="password")
70
 
71
+ # -----------------------------------------------------------------
72
+ # Right column – chat interface
73
+ # -----------------------------------------------------------------
74
+ with gr.Column(scale=2):
75
+ gr.Markdown("### Chat")
76
+ question_box = gr.Textbox(
77
+ label="Question",
78
+ lines=3,
79
+ placeholder="e.g. How has my sleep changed since starting Prozac last month?",
80
+ )
81
+ answer_box = gr.Textbox(label="Assistant", lines=8, interactive=False)
82
+ ask_btn = gr.Button("Ask Assistant ▶️", variant="primary")
83
+ ask_btn.click(
84
+ fn=answer_sync,
85
+ inputs=[
86
+ question_box,
87
+ openai_key_box,
88
+ whoop_email_box,
89
+ whoop_pass_box,
90
+ ],
91
+ outputs=answer_box,
92
+ )
93
 
94
+ gr.Markdown(
95
+ "---\nBuilt by [Natalia B.](https://www.linkedin.com/in/natalia-bobkova/), [Victoria L.](https://www.linkedin.com/in/victoria-latynina/)"
96
+ )
97
 
98
+ return app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
 
101
  if __name__ == "__main__":
102
+ build_interface().launch()
wellbe_agent.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Core logic for the WellBe+ multi‑context AI assistant.
3
+
4
+ This module handles MCP server instantiation, model selection, and single‑shot
5
+ question answering. It is intentionally agnostic of any UI layer so that it can
6
+ be re‑used from a command‑line script, a notebook, or the Gradio front‑end.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import asyncio
12
+ from copy import deepcopy
13
+ from typing import Tuple, Union
14
+
15
+ from agents import Agent, Runner
16
+ from agents.extensions.models.litellm_model import LitellmModel
17
+ from agents.mcp.server import MCPServerStdio, MCPServerStreamableHttp
18
+
19
+ from config import PROMPT_TEMPLATE, MCP_CONFIGS
20
+
21
+ __all__ = [
22
+ "build_mcp_servers",
23
+ "select_model",
24
+ "answer_question",
25
+ ]
26
+
27
+
28
+ def build_mcp_servers(
29
+ whoop_email: str, whoop_password: str
30
+ ) -> Tuple[MCPServerStreamableHttp, MCPServerStdio]:
31
+ """Return configured Healthcare and Whoop MCP servers.
32
+
33
+ Parameters
34
+ ----------
35
+ whoop_email, whoop_password
36
+ User credentials for the private Whoop feed. The credentials are merged
37
+ into a *deep‑copied* variant of ``MCP_CONFIGS`` so that the global
38
+ settings remain untouched.
39
+ """
40
+ cfg = deepcopy(MCP_CONFIGS)
41
+ cfg["whoop"].update({"username": whoop_email, "password": whoop_password})
42
+
43
+ healthcare_server = MCPServerStreamableHttp(
44
+ params=cfg["healthcare-mcp-public"], name="Healthcare MCP Server"
45
+ )
46
+ whoop_server = MCPServerStdio(params=cfg["whoop"], name="Whoop MCP Server")
47
+ return healthcare_server, whoop_server
48
+
49
+ def select_model() -> Union[str, LitellmModel]:
50
+ return "o3-mini"
51
+
52
+
53
+ async def answer_question(
54
+ question: str,
55
+ openai_key: str,
56
+ whoop_email: str,
57
+ whoop_password: str,
58
+ ) -> str:
59
+ """Run the WellBe+ agent on a single question and return the assistant reply."""
60
+ healthcare_srv, whoop_srv = build_mcp_servers(whoop_email, whoop_password)
61
+
62
+ async with healthcare_srv as hserver, whoop_srv as wserver:
63
+ agent = Agent(
64
+ name="WellBe+ Assistant",
65
+ instructions=PROMPT_TEMPLATE,
66
+ model=select_model(),
67
+ mcp_servers=[hserver, wserver],
68
+ )
69
+ result = await Runner.run(agent, question)
70
+ return result.final_output
71
+
72
+
73
+ def answer_sync(question: str, openai_key: str, email: str, password: str) -> str:
74
+ """Blocking wrapper around :func:`answer_question`."""
75
+ if not question.strip():
76
+ return "Please enter a question."
77
+ try:
78
+ return asyncio.run(
79
+ answer_question(
80
+ question, openai_key.strip(), email.strip(), password.strip()
81
+ )
82
+ )
83
+ except Exception as exc: # noqa: BLE001
84
+ return f"**Error:** {exc}"