Commit
·
9f566ef
1
Parent(s):
b5abced
bug fixes
Browse files- app/api_helpers.py +1 -1
- app/openai_handler.py +3 -2
app/api_helpers.py
CHANGED
@@ -365,7 +365,7 @@ async def openai_fake_stream_generator( # Reverted signature: removed thought_ta
|
|
365 |
|
366 |
# Use the already configured extra_body which includes the thought_tag_marker
|
367 |
_api_call_task = asyncio.create_task(
|
368 |
-
openai_client.chat.completions.create(**params_for_non_stream_call, extra_body=openai_extra_body
|
369 |
)
|
370 |
raw_response = await _api_call_task
|
371 |
full_content_from_api = ""
|
|
|
365 |
|
366 |
# Use the already configured extra_body which includes the thought_tag_marker
|
367 |
_api_call_task = asyncio.create_task(
|
368 |
+
openai_client.chat.completions.create(**params_for_non_stream_call, extra_body=openai_extra_body)
|
369 |
)
|
370 |
raw_response = await _api_call_task
|
371 |
full_content_from_api = ""
|
app/openai_handler.py
CHANGED
@@ -115,7 +115,7 @@ class OpenAIDirectHandler:
|
|
115 |
openai_params_for_stream = {**openai_params, "stream": True}
|
116 |
stream_response = await openai_client.chat.completions.create(
|
117 |
**openai_params_for_stream,
|
118 |
-
extra_body=openai_extra_body
|
119 |
)
|
120 |
|
121 |
# Create processor for tag-based extraction across chunks
|
@@ -134,6 +134,7 @@ class OpenAIDirectHandler:
|
|
134 |
del delta['extra_content']
|
135 |
|
136 |
content = delta.get('content', '')
|
|
|
137 |
if content:
|
138 |
# Use the processor to extract reasoning
|
139 |
processed_content, current_reasoning = reasoning_processor.process_chunk(content)
|
@@ -198,7 +199,7 @@ class OpenAIDirectHandler:
|
|
198 |
openai_params_non_stream = {**openai_params, "stream": False}
|
199 |
response = await openai_client.chat.completions.create(
|
200 |
**openai_params_non_stream,
|
201 |
-
extra_body=openai_extra_body
|
202 |
)
|
203 |
response_dict = response.model_dump(exclude_unset=True, exclude_none=True)
|
204 |
|
|
|
115 |
openai_params_for_stream = {**openai_params, "stream": True}
|
116 |
stream_response = await openai_client.chat.completions.create(
|
117 |
**openai_params_for_stream,
|
118 |
+
extra_body=openai_extra_body
|
119 |
)
|
120 |
|
121 |
# Create processor for tag-based extraction across chunks
|
|
|
134 |
del delta['extra_content']
|
135 |
|
136 |
content = delta.get('content', '')
|
137 |
+
print(content)
|
138 |
if content:
|
139 |
# Use the processor to extract reasoning
|
140 |
processed_content, current_reasoning = reasoning_processor.process_chunk(content)
|
|
|
199 |
openai_params_non_stream = {**openai_params, "stream": False}
|
200 |
response = await openai_client.chat.completions.create(
|
201 |
**openai_params_non_stream,
|
202 |
+
extra_body=openai_extra_body
|
203 |
)
|
204 |
response_dict = response.model_dump(exclude_unset=True, exclude_none=True)
|
205 |
|