Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -372,6 +372,7 @@ async def search_gpt(q: str, stream: Optional[bool] = False, systemprompt: Optio
|
|
372 |
return JSONResponse(content={"response": collected_text})
|
373 |
|
374 |
# Enhanced streaming with direct SSE pass-through for real-time responses
|
|
|
375 |
@app.post("/chat/completions")
|
376 |
@app.post("/api/v1/chat/completions")
|
377 |
async def get_completion(payload: Payload, request: Request, authenticated: bool = Depends(verify_api_key)):
|
@@ -420,9 +421,9 @@ async def get_completion(payload: Payload, request: Request, authenticated: bool
|
|
420 |
else:
|
421 |
endpoint = env_vars['secret_api_endpoint']
|
422 |
custom_headers = {
|
423 |
-
"Origin":
|
424 |
"Priority": "u=1, i",
|
425 |
-
"Referer":
|
426 |
}
|
427 |
|
428 |
print(f"Using endpoint: {endpoint} for model: {model_to_use}")
|
|
|
372 |
return JSONResponse(content={"response": collected_text})
|
373 |
|
374 |
# Enhanced streaming with direct SSE pass-through for real-time responses
|
375 |
+
header_url = os.getenv('HEADER_URL')
|
376 |
@app.post("/chat/completions")
|
377 |
@app.post("/api/v1/chat/completions")
|
378 |
async def get_completion(payload: Payload, request: Request, authenticated: bool = Depends(verify_api_key)):
|
|
|
421 |
else:
|
422 |
endpoint = env_vars['secret_api_endpoint']
|
423 |
custom_headers = {
|
424 |
+
"Origin": header_url,
|
425 |
"Priority": "u=1, i",
|
426 |
+
"Referer": header_url
|
427 |
}
|
428 |
|
429 |
print(f"Using endpoint: {endpoint} for model: {model_to_use}")
|