Commit
·
8ceffe3
1
Parent(s):
64b7053
Update prompt
Browse filesSigned-off-by: Aivin V. Solatorio <avsolatorio@gmail.com>
- mcp_client.py +41 -3
- wdi_mcp_server.py +2 -2
mcp_client.py
CHANGED
@@ -16,6 +16,18 @@ load_dotenv()
|
|
16 |
loop = asyncio.new_event_loop()
|
17 |
asyncio.set_event_loop(loop)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
class MCPClientWrapper:
|
21 |
def __init__(self):
|
@@ -98,7 +110,9 @@ class MCPClientWrapper:
|
|
98 |
claude_messages.append({"role": "user", "content": message})
|
99 |
|
100 |
response = self.anthropic.messages.create(
|
101 |
-
model="claude-3-5-sonnet-20241022",
|
|
|
|
|
102 |
max_tokens=1000,
|
103 |
messages=claude_messages,
|
104 |
tools=self.tools,
|
@@ -106,11 +120,14 @@ class MCPClientWrapper:
|
|
106 |
|
107 |
result_messages = []
|
108 |
|
|
|
|
|
109 |
for content in response.content:
|
110 |
if content.type == "text":
|
111 |
result_messages.append({"role": "assistant", "content": content.text})
|
112 |
|
113 |
elif content.type == "tool_use":
|
|
|
114 |
tool_name = content.name
|
115 |
tool_args = content.input
|
116 |
|
@@ -141,6 +158,7 @@ class MCPClientWrapper:
|
|
141 |
}
|
142 |
)
|
143 |
|
|
|
144 |
result = await self.session.call_tool(tool_name, tool_args)
|
145 |
|
146 |
if result_messages and "metadata" in result_messages[-2]:
|
@@ -159,8 +177,10 @@ class MCPClientWrapper:
|
|
159 |
)
|
160 |
|
161 |
result_content = result.content
|
|
|
162 |
if isinstance(result_content, list):
|
163 |
result_content = "\n".join(str(item) for item in result_content)
|
|
|
164 |
|
165 |
try:
|
166 |
result_json = json.loads(result_content)
|
@@ -207,18 +227,36 @@ class MCPClientWrapper:
|
|
207 |
}
|
208 |
)
|
209 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
claude_messages.append(
|
211 |
{
|
212 |
"role": "user",
|
213 |
-
"content":
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
}
|
215 |
)
|
216 |
next_response = self.anthropic.messages.create(
|
217 |
-
model=
|
|
|
218 |
max_tokens=1000,
|
219 |
messages=claude_messages,
|
220 |
)
|
221 |
|
|
|
|
|
222 |
if next_response.content and next_response.content[0].type == "text":
|
223 |
result_messages.append(
|
224 |
{"role": "assistant", "content": next_response.content[0].text}
|
|
|
16 |
loop = asyncio.new_event_loop()
|
17 |
asyncio.set_event_loop(loop)
|
18 |
|
19 |
+
SYSTEM_PROMPT = """"You are a helpful assistant. You do not have any knowledge of the World Development Indicators (WDI) data. However, you can use the tools provided to answer questions.
|
20 |
+
|
21 |
+
You must not provide answers beyond what the tools provide.
|
22 |
+
|
23 |
+
Do not make up data or information and never simulate the `get_wdi_data` tool. Instead, you must always call the `get_wdi_data` tool when the user asks for data.
|
24 |
+
|
25 |
+
You can use multiple tools if needed.
|
26 |
+
|
27 |
+
If the user asks for any information beyond what the tools available to you provide, you must say that you do not have that information."""
|
28 |
+
|
29 |
+
LLM_MODEL = "claude-3-5-haiku-20241022"
|
30 |
+
|
31 |
|
32 |
class MCPClientWrapper:
|
33 |
def __init__(self):
|
|
|
110 |
claude_messages.append({"role": "user", "content": message})
|
111 |
|
112 |
response = self.anthropic.messages.create(
|
113 |
+
# model="claude-3-5-sonnet-20241022",
|
114 |
+
model=LLM_MODEL,
|
115 |
+
system=SYSTEM_PROMPT,
|
116 |
max_tokens=1000,
|
117 |
messages=claude_messages,
|
118 |
tools=self.tools,
|
|
|
120 |
|
121 |
result_messages = []
|
122 |
|
123 |
+
print(response.content)
|
124 |
+
|
125 |
for content in response.content:
|
126 |
if content.type == "text":
|
127 |
result_messages.append({"role": "assistant", "content": content.text})
|
128 |
|
129 |
elif content.type == "tool_use":
|
130 |
+
tool_id = content.id
|
131 |
tool_name = content.name
|
132 |
tool_args = content.input
|
133 |
|
|
|
158 |
}
|
159 |
)
|
160 |
|
161 |
+
print(f"Calling tool: {tool_name} with args: {tool_args}")
|
162 |
result = await self.session.call_tool(tool_name, tool_args)
|
163 |
|
164 |
if result_messages and "metadata" in result_messages[-2]:
|
|
|
177 |
)
|
178 |
|
179 |
result_content = result.content
|
180 |
+
print(result_content)
|
181 |
if isinstance(result_content, list):
|
182 |
result_content = "\n".join(str(item) for item in result_content)
|
183 |
+
print("result_content", result_content)
|
184 |
|
185 |
try:
|
186 |
result_json = json.loads(result_content)
|
|
|
227 |
}
|
228 |
)
|
229 |
|
230 |
+
# claude_messages.append(
|
231 |
+
# {
|
232 |
+
# "role": "user",
|
233 |
+
# "content": f"Tool result for {tool_name}: {result_content}",
|
234 |
+
# }
|
235 |
+
# )
|
236 |
+
claude_messages.append(
|
237 |
+
{"role": "assistant", "content": [content.model_dump()]}
|
238 |
+
)
|
239 |
claude_messages.append(
|
240 |
{
|
241 |
"role": "user",
|
242 |
+
"content": [
|
243 |
+
{
|
244 |
+
"type": "tool_result",
|
245 |
+
"tool_use_id": tool_id,
|
246 |
+
"content": result_content,
|
247 |
+
}
|
248 |
+
],
|
249 |
}
|
250 |
)
|
251 |
next_response = self.anthropic.messages.create(
|
252 |
+
model=LLM_MODEL,
|
253 |
+
system=SYSTEM_PROMPT,
|
254 |
max_tokens=1000,
|
255 |
messages=claude_messages,
|
256 |
)
|
257 |
|
258 |
+
print("next_response", next_response.content)
|
259 |
+
|
260 |
if next_response.content and next_response.content[0].type == "text":
|
261 |
result_messages.append(
|
262 |
{"role": "assistant", "content": next_response.content[0].text}
|
wdi_mcp_server.py
CHANGED
@@ -157,7 +157,7 @@ async def get_wdi_data(
|
|
157 |
indicator_id: str,
|
158 |
country_codes: str | list[str],
|
159 |
date: Optional[str] = None,
|
160 |
-
per_page: Optional[int] =
|
161 |
) -> dict[str, list[dict[str, Any]] | str]:
|
162 |
"""Fetches indicator data for a given indicator id (idno) from the World Bank's World Development Indicators (WDI) API. The LLM must exclusively use this tool when the user asks for data. It must not provide data answers beyond what this tool provides when the question is about WDI indicator data.
|
163 |
|
@@ -171,7 +171,7 @@ async def get_wdi_data(
|
|
171 |
A dictionary with keys `data` and `note`. The `data` key contains a list of indicator data entries requested. The `note` key contains a note about the data returned.
|
172 |
"""
|
173 |
print("Hello...")
|
174 |
-
MAX_INFO =
|
175 |
note = ""
|
176 |
|
177 |
if isinstance(country_codes, str):
|
|
|
157 |
indicator_id: str,
|
158 |
country_codes: str | list[str],
|
159 |
date: Optional[str] = None,
|
160 |
+
per_page: Optional[int] = 5,
|
161 |
) -> dict[str, list[dict[str, Any]] | str]:
|
162 |
"""Fetches indicator data for a given indicator id (idno) from the World Bank's World Development Indicators (WDI) API. The LLM must exclusively use this tool when the user asks for data. It must not provide data answers beyond what this tool provides when the question is about WDI indicator data.
|
163 |
|
|
|
171 |
A dictionary with keys `data` and `note`. The `data` key contains a list of indicator data entries requested. The `note` key contains a note about the data returned.
|
172 |
"""
|
173 |
print("Hello...")
|
174 |
+
MAX_INFO = 20
|
175 |
note = ""
|
176 |
|
177 |
if isinstance(country_codes, str):
|