bibibi12345 commited on
Commit
24110a7
·
verified ·
1 Parent(s): a03de74

all 2.5f model now have nothinking and max mode

Browse files
Files changed (1) hide show
  1. app/routes/chat_api.py +4 -4
app/routes/chat_api.py CHANGED
@@ -95,10 +95,10 @@ async def chat_completions(fastapi_request: Request, request: OpenAIRequest, api
95
  elif is_max_thinking_model: base_model_name = base_model_name[:-len("-max")]
96
 
97
  # Specific model variant checks (if any remain exclusive and not covered dynamically)
98
- if is_nothinking_model and base_model_name != "gemini-2.5-flash-preview-04-17":
99
- return JSONResponse(status_code=400, content=create_openai_error_response(400, f"Model '{request.model}' (-nothinking) is only supported for 'gemini-2.5-flash-preview-04-17'.", "invalid_request_error"))
100
- if is_max_thinking_model and base_model_name != "gemini-2.5-flash-preview-04-17":
101
- return JSONResponse(status_code=400, content=create_openai_error_response(400, f"Model '{request.model}' (-max) is only supported for 'gemini-2.5-flash-preview-04-17'.", "invalid_request_error"))
102
 
103
  generation_config = create_generation_config(request)
104
 
 
95
  elif is_max_thinking_model: base_model_name = base_model_name[:-len("-max")]
96
 
97
  # Specific model variant checks (if any remain exclusive and not covered dynamically)
98
+ if is_nothinking_model and not base_model_name.startswith("gemini-2.5-flash"):
99
+ return JSONResponse(status_code=400, content=create_openai_error_response(400, f"Model '{request.model}' (-nothinking) is only supported for models starting with 'gemini-2.5-flash'.", "invalid_request_error"))
100
+ if is_max_thinking_model and not base_model_name.startswith("gemini-2.5-flash"):
101
+ return JSONResponse(status_code=400, content=create_openai_error_response(400, f"Model '{request.model}' (-max) is only supported for models starting with 'gemini-2.5-flash'.", "invalid_request_error"))
102
 
103
  generation_config = create_generation_config(request)
104