Commit
·
f49ac6f
1
Parent(s):
d444594
Handle overload error.
Browse filesSigned-off-by: Aivin V. Solatorio <avsolatorio@gmail.com>
- mcp_client.py +32 -14
mcp_client.py
CHANGED
@@ -9,6 +9,7 @@ from gradio.components.chatbot import ChatMessage
|
|
9 |
from mcp import ClientSession, StdioServerParameters
|
10 |
from mcp.client.stdio import stdio_client
|
11 |
from anthropic import Anthropic
|
|
|
12 |
from dotenv import load_dotenv
|
13 |
|
14 |
load_dotenv()
|
@@ -150,14 +151,22 @@ class MCPClientWrapper:
|
|
150 |
|
151 |
claude_messages.append({"role": "user", "content": message})
|
152 |
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
result_messages = []
|
163 |
|
@@ -272,12 +281,21 @@ class MCPClientWrapper:
|
|
272 |
],
|
273 |
}
|
274 |
)
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
281 |
|
282 |
print("next_response", next_response.content)
|
283 |
|
|
|
9 |
from mcp import ClientSession, StdioServerParameters
|
10 |
from mcp.client.stdio import stdio_client
|
11 |
from anthropic import Anthropic
|
12 |
+
from anthropic._exceptions import OverloadedError
|
13 |
from dotenv import load_dotenv
|
14 |
|
15 |
load_dotenv()
|
|
|
151 |
|
152 |
claude_messages.append({"role": "user", "content": message})
|
153 |
|
154 |
+
try:
|
155 |
+
response = self.anthropic.messages.create(
|
156 |
+
# model="claude-3-5-sonnet-20241022",
|
157 |
+
model=LLM_MODEL,
|
158 |
+
system=SYSTEM_PROMPT,
|
159 |
+
max_tokens=1000,
|
160 |
+
messages=claude_messages,
|
161 |
+
tools=self.tools,
|
162 |
+
)
|
163 |
+
except OverloadedError:
|
164 |
+
return [
|
165 |
+
{
|
166 |
+
"role": "assistant",
|
167 |
+
"content": "The LLM API is overloaded now, try again later...",
|
168 |
+
}
|
169 |
+
]
|
170 |
|
171 |
result_messages = []
|
172 |
|
|
|
281 |
],
|
282 |
}
|
283 |
)
|
284 |
+
|
285 |
+
try:
|
286 |
+
next_response = self.anthropic.messages.create(
|
287 |
+
model=LLM_MODEL,
|
288 |
+
system=SYSTEM_PROMPT,
|
289 |
+
max_tokens=1000,
|
290 |
+
messages=claude_messages,
|
291 |
+
)
|
292 |
+
except OverloadedError:
|
293 |
+
return [
|
294 |
+
{
|
295 |
+
"role": "assistant",
|
296 |
+
"content": "The LLM API is overloaded now, try again later...",
|
297 |
+
}
|
298 |
+
]
|
299 |
|
300 |
print("next_response", next_response.content)
|
301 |
|