Spaces:
Sleeping
Sleeping
Commit
·
93bf59b
1
Parent(s):
f94c558
fix(chatCompletion): handle errors in stream processing gracefully
Browse filesEnsure that the [DONE] message is always sent even if an error occurs during stream processing. This prevents the client from waiting indefinitely and improves the robustness of the chat completion endpoint.
- routes/chatCompletion.py +13 -3
routes/chatCompletion.py
CHANGED
@@ -7,9 +7,19 @@ import json
|
|
7 |
router = APIRouter()
|
8 |
|
9 |
def generate_stream(response):
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
@router.post("/v1/chat/completions", tags=["Chat Completion"])
|
15 |
async def chat_completion(body: ChatRequest):
|
|
|
7 |
router = APIRouter()
|
8 |
|
9 |
def generate_stream(response):
|
10 |
+
try:
|
11 |
+
for chunk in response:
|
12 |
+
try:
|
13 |
+
# Attempt to process and yield the chunk
|
14 |
+
yield f"data: {json.dumps(chunk.__dict__, separators=(',', ':'))}\n\n"
|
15 |
+
except Exception as e:
|
16 |
+
# Optional: Log the error for debugging
|
17 |
+
print(f"Error during stream processing: {e}")
|
18 |
+
# Stop sending chunks if an error occurs
|
19 |
+
break
|
20 |
+
finally:
|
21 |
+
# Ensure the [DONE] message is always sent, even if an error occurred
|
22 |
+
yield "data: [DONE]\n\n"
|
23 |
|
24 |
@router.post("/v1/chat/completions", tags=["Chat Completion"])
|
25 |
async def chat_completion(body: ChatRequest):
|