Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -1,158 +1,75 @@
|
|
1 |
import os
|
2 |
-
import re
|
3 |
-
from dotenv import load_dotenv
|
4 |
-
from fastapi import FastAPI, HTTPException, Request, Depends, Security, Query
|
5 |
-
from fastapi.responses import StreamingResponse, HTMLResponse, JSONResponse, FileResponse, PlainTextResponse
|
6 |
-
from fastapi.security import APIKeyHeader
|
7 |
-
from pydantic import BaseModel
|
8 |
-
import httpx
|
9 |
-
from functools import lru_cache
|
10 |
-
from pathlib import Path
|
11 |
import json
|
12 |
import datetime
|
13 |
-
import time
|
14 |
-
import threading
|
15 |
-
from typing import Optional, Dict, List, Any, Generator
|
16 |
import asyncio
|
17 |
-
|
18 |
-
import
|
19 |
-
from
|
20 |
-
import
|
21 |
-
from fastapi.middleware.gzip import GZipMiddleware
|
22 |
-
from starlette.middleware.cors import CORSMiddleware
|
23 |
-
import contextlib
|
24 |
-
import requests
|
25 |
-
|
26 |
-
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
|
27 |
-
|
28 |
-
executor = ThreadPoolExecutor(max_workers=16)
|
29 |
|
30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
-
|
|
|
|
|
|
|
|
|
33 |
|
34 |
from usage_tracker import UsageTracker
|
35 |
-
usage_tracker = UsageTracker()
|
36 |
-
|
37 |
-
app = FastAPI()
|
38 |
-
|
39 |
-
app.add_middleware(GZipMiddleware, minimum_size=1000)
|
40 |
-
app.add_middleware(
|
41 |
-
CORSMiddleware,
|
42 |
-
allow_origins=["*"],
|
43 |
-
allow_credentials=True,
|
44 |
-
allow_methods=["*"],
|
45 |
-
allow_headers=["*"],
|
46 |
-
)
|
47 |
-
|
48 |
-
@lru_cache(maxsize=1)
|
49 |
-
def get_env_vars():
|
50 |
-
return {
|
51 |
-
'api_keys': os.getenv('API_KEYS', '').split(','),
|
52 |
-
'secret_api_endpoint': os.getenv('SECRET_API_ENDPOINT'),
|
53 |
-
'secret_api_endpoint_2': os.getenv('SECRET_API_ENDPOINT_2'),
|
54 |
-
'secret_api_endpoint_3': os.getenv('SECRET_API_ENDPOINT_3'),
|
55 |
-
'secret_api_endpoint_4': "https://text.pollinations.ai/openai",
|
56 |
-
'secret_api_endpoint_5': os.getenv('SECRET_API_ENDPOINT_5'),
|
57 |
-
'secret_api_endpoint_6': os.getenv('SECRET_API_ENDPOINT_6'), # New endpoint for Gemini
|
58 |
-
'mistral_api': "https://api.mistral.ai",
|
59 |
-
'mistral_key': os.getenv('MISTRAL_KEY'),
|
60 |
-
'gemini_key': os.getenv('GEMINI_KEY'), # Gemini API Key
|
61 |
-
'endpoint_origin': os.getenv('ENDPOINT_ORIGIN')
|
62 |
-
}
|
63 |
-
|
64 |
-
mistral_models = {
|
65 |
-
"mistral-large-latest",
|
66 |
-
"pixtral-large-latest",
|
67 |
-
"mistral-moderation-latest",
|
68 |
-
"ministral-3b-latest",
|
69 |
-
"ministral-8b-latest",
|
70 |
-
"open-mistral-nemo",
|
71 |
-
"mistral-small-latest",
|
72 |
-
"mistral-saba-latest",
|
73 |
-
"codestral-latest"
|
74 |
-
}
|
75 |
-
|
76 |
-
pollinations_models = {
|
77 |
-
"openai",
|
78 |
-
"openai-large",
|
79 |
-
"openai-fast",
|
80 |
-
"openai-xlarge",
|
81 |
-
"openai-reasoning",
|
82 |
-
"qwen-coder",
|
83 |
-
"llama",
|
84 |
-
"mistral",
|
85 |
-
"searchgpt",
|
86 |
-
"deepseek",
|
87 |
-
"claude-hybridspace",
|
88 |
-
"deepseek-r1",
|
89 |
-
"deepseek-reasoner",
|
90 |
-
"llamalight",
|
91 |
-
"gemini",
|
92 |
-
"gemini-thinking",
|
93 |
-
"hormoz",
|
94 |
-
"phi",
|
95 |
-
"phi-mini",
|
96 |
-
"openai-audio",
|
97 |
-
"llama-scaleway"
|
98 |
-
}
|
99 |
-
alternate_models = {
|
100 |
-
"o1",
|
101 |
-
"llama-4-scout",
|
102 |
-
"o4-mini",
|
103 |
-
"sonar",
|
104 |
-
"sonar-pro",
|
105 |
-
"sonar-reasoning",
|
106 |
-
"sonar-reasoning-pro",
|
107 |
-
"grok-3",
|
108 |
-
"grok-3-fast",
|
109 |
-
"r1-1776",
|
110 |
-
"o3"
|
111 |
-
}
|
112 |
-
|
113 |
-
claude_3_models = {
|
114 |
-
"claude-3-7-sonnet",
|
115 |
-
"claude-3-7-sonnet-thinking",
|
116 |
-
"claude 3.5 haiku",
|
117 |
-
"claude 3.5 sonnet",
|
118 |
-
"claude 3.5 haiku",
|
119 |
-
"o3-mini-medium",
|
120 |
-
"o3-mini-high",
|
121 |
-
"grok-3",
|
122 |
-
"grok-3-thinking",
|
123 |
-
"grok 2"
|
124 |
-
}
|
125 |
-
|
126 |
-
gemini_models = {
|
127 |
-
"gemini-1.5-pro",
|
128 |
-
"gemini-1.5-flash",
|
129 |
-
"gemini-2.0-flash-lite-preview",
|
130 |
-
"gemini-2.0-flash",
|
131 |
-
"gemini-2.0-flash-thinking", # aka Reasoning
|
132 |
-
"gemini-2.0-flash-preview-image-generation",
|
133 |
-
"gemini-2.5-flash",
|
134 |
-
"gemini-2.5-pro-exp",
|
135 |
-
"gemini-exp-1206"
|
136 |
-
}
|
137 |
-
|
138 |
-
|
139 |
-
supported_image_models = {
|
140 |
-
"Flux Pro Ultra",
|
141 |
-
"grok-2-aurora",
|
142 |
-
"Flux Pro",
|
143 |
-
"Flux Pro Ultra Raw",
|
144 |
-
"Flux Dev",
|
145 |
-
"Flux Schnell",
|
146 |
-
"stable-diffusion-3-large-turbo",
|
147 |
-
"Flux Realism",
|
148 |
-
"stable-diffusion-ultra",
|
149 |
-
"dall-e-3",
|
150 |
-
"sdxl-lightning-4step"
|
151 |
-
}
|
152 |
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
model: str
|
155 |
-
messages:
|
156 |
stream: bool = False
|
157 |
|
158 |
class ImageGenerationPayload(BaseModel):
|
@@ -161,809 +78,318 @@ class ImageGenerationPayload(BaseModel):
|
|
161 |
size: int
|
162 |
number: int
|
163 |
|
164 |
-
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
-
|
168 |
-
def
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
|
174 |
-
|
175 |
-
|
|
|
176 |
|
177 |
-
def
|
178 |
-
|
179 |
-
|
180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
|
182 |
-
return
|
183 |
|
184 |
-
|
|
|
|
|
|
|
185 |
request: Request,
|
186 |
-
api_key: str =
|
187 |
-
|
188 |
-
|
189 |
-
if
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
if api_key.startswith('Bearer '):
|
200 |
-
api_key = api_key[7:]
|
201 |
-
|
202 |
-
valid_api_keys = get_env_vars().get('api_keys', [])
|
203 |
-
if not valid_api_keys or valid_api_keys == ['']:
|
204 |
-
raise HTTPException(
|
205 |
-
status_code=HTTP_403_FORBIDDEN,
|
206 |
-
detail="API keys not configured on server"
|
207 |
-
)
|
208 |
-
|
209 |
-
if api_key not in set(valid_api_keys):
|
210 |
-
raise HTTPException(
|
211 |
-
status_code=HTTP_403_FORBIDDEN,
|
212 |
-
detail="Invalid API key"
|
213 |
-
)
|
214 |
-
|
215 |
-
return True
|
216 |
-
|
217 |
-
@lru_cache(maxsize=1)
|
218 |
-
def load_models_data():
|
219 |
try:
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
except
|
224 |
-
|
225 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
|
|
|
227 |
async def get_models():
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
async def generate_search_async(query: str, systemprompt: Optional[str] = None, stream: bool = True):
|
234 |
-
queue = asyncio.Queue()
|
235 |
-
|
236 |
-
async def _fetch_search_data():
|
237 |
-
try:
|
238 |
-
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"}
|
239 |
-
system_message = systemprompt or "Be Helpful and Friendly"
|
240 |
-
prompt = [{"role": "user", "content": query}]
|
241 |
-
prompt.insert(0, {"content": system_message, "role": "system"})
|
242 |
-
payload = {
|
243 |
-
"is_vscode_extension": True,
|
244 |
-
"message_history": prompt,
|
245 |
-
"requested_model": "searchgpt",
|
246 |
-
"user_input": prompt[-1]["content"],
|
247 |
-
}
|
248 |
-
secret_api_endpoint_3 = get_env_vars()['secret_api_endpoint_3']
|
249 |
-
if not secret_api_endpoint_3:
|
250 |
-
await queue.put({"error": "Search API endpoint not configured"})
|
251 |
-
return
|
252 |
-
|
253 |
-
async with httpx.AsyncClient(timeout=30.0) as client:
|
254 |
-
async with client.stream("POST", secret_api_endpoint_3, json=payload, headers=headers) as response:
|
255 |
-
if response.status_code != 200:
|
256 |
-
await queue.put({"error": f"Search API returned status code {response.status_code}"})
|
257 |
-
return
|
258 |
-
|
259 |
-
buffer = ""
|
260 |
-
async for line in response.aiter_lines():
|
261 |
-
if line.startswith("data: "):
|
262 |
-
try:
|
263 |
-
json_data = json.loads(line[6:])
|
264 |
-
content = json_data.get("choices", [{}])[0].get("delta", {}).get("content", "")
|
265 |
-
if content.strip():
|
266 |
-
cleaned_response = {
|
267 |
-
"created": json_data.get("created"),
|
268 |
-
"id": json_data.get("id"),
|
269 |
-
"model": "searchgpt",
|
270 |
-
"object": "chat.completion",
|
271 |
-
"choices": [
|
272 |
-
{
|
273 |
-
"message": {
|
274 |
-
"content": content
|
275 |
-
}
|
276 |
-
}
|
277 |
-
]
|
278 |
-
}
|
279 |
-
await queue.put({"data": f"data: {json.dumps(cleaned_response)}\n\n", "text": content})
|
280 |
-
except json.JSONDecodeError:
|
281 |
-
continue
|
282 |
-
await queue.put(None)
|
283 |
-
except Exception as e:
|
284 |
-
await queue.put({"error": str(e)})
|
285 |
-
await queue.put(None)
|
286 |
-
|
287 |
-
asyncio.create_task(_fetch_search_data())
|
288 |
-
return queue
|
289 |
|
|
|
290 |
@lru_cache(maxsize=10)
|
291 |
-
def
|
292 |
try:
|
293 |
-
with open(file_path, "r") as file:
|
294 |
return file.read()
|
295 |
except FileNotFoundError:
|
296 |
return None
|
297 |
|
298 |
-
@
|
299 |
-
async def
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
html_content = read_html_file("index.html")
|
315 |
-
if html_content is None:
|
316 |
-
return HTMLResponse(content="<h1>File not found</h1>", status_code=404)
|
317 |
-
return HTMLResponse(content=html_content)
|
318 |
-
|
319 |
-
@app.get("/script.js", response_class=HTMLResponse)
|
320 |
-
async def script():
|
321 |
-
html_content = read_html_file("script.js")
|
322 |
-
if html_content is None:
|
323 |
-
return HTMLResponse(content="<h1>File not found</h1>", status_code=404)
|
324 |
-
return HTMLResponse(content=html_content)
|
325 |
-
|
326 |
-
@app.get("/style.css", response_class=HTMLResponse)
|
327 |
-
async def style():
|
328 |
-
html_content = read_html_file("style.css")
|
329 |
-
if html_content is None:
|
330 |
-
return HTMLResponse(content="<h1>File not found</h1>", status_code=404)
|
331 |
-
return HTMLResponse(content=html_content)
|
332 |
-
|
333 |
-
@app.get("/dynamo", response_class=HTMLResponse)
|
334 |
-
async def dynamic_ai_page(request: Request):
|
335 |
-
user_agent = request.headers.get('user-agent', 'Unknown User')
|
336 |
-
client_ip = request.client.host
|
337 |
-
location = f"IP: {client_ip}"
|
338 |
-
|
339 |
-
prompt = f"""
|
340 |
-
Generate a dynamic HTML page for a user with the following details: with name "LOKI.AI"
|
341 |
-
- User-Agent: {user_agent}
|
342 |
-
- Location: {location}
|
343 |
-
- Style: Cyberpunk, minimalist, or retro
|
344 |
-
|
345 |
-
Make sure the HTML is clean and includes a heading, also have cool animations a motivational message, and a cool background.
|
346 |
-
Wrap the generated HTML in triple backticks (```).
|
347 |
-
"""
|
348 |
-
|
349 |
-
payload = {
|
350 |
-
"model": "mistral-small-latest",
|
351 |
-
"messages": [{"role": "user", "content": prompt}]
|
352 |
-
}
|
353 |
-
|
354 |
-
headers = {
|
355 |
-
"Authorization": "Bearer playground"
|
356 |
-
}
|
357 |
-
|
358 |
-
response = requests.post("[https://parthsadaria-lokiai.hf.space/chat/completions](https://parthsadaria-lokiai.hf.space/chat/completions)", json=payload, headers=headers)
|
359 |
-
data = response.json()
|
360 |
-
|
361 |
-
html_content = re.search(r"```(.*?)```", data['choices'][0]['message']['content'], re.DOTALL)
|
362 |
-
if html_content:
|
363 |
-
html_content = html_content.group(1).strip()
|
364 |
-
|
365 |
-
if html_content:
|
366 |
-
html_content = ' '.join(html_content.split(' ')[1:])
|
367 |
-
|
368 |
-
return HTMLResponse(content=html_content)
|
369 |
-
|
370 |
-
@app.get("/scraper", response_class=PlainTextResponse)
|
371 |
-
def scrape_site(url: str = Query(..., description="URL to scrape")):
|
372 |
try:
|
373 |
scraper = cloudscraper.create_scraper()
|
374 |
response = scraper.get(url)
|
375 |
-
|
376 |
-
|
377 |
except Exception as e:
|
378 |
-
|
379 |
-
return "Cloudscraper failed."
|
380 |
-
|
381 |
-
@app.get("/playground", response_class=HTMLResponse)
|
382 |
-
async def playground():
|
383 |
-
html_content = read_html_file("playground.html")
|
384 |
-
if html_content is None:
|
385 |
-
return HTMLResponse(content="<h1>playground.html not found</h1>", status_code=404)
|
386 |
-
return HTMLResponse(content=html_content)
|
387 |
-
|
388 |
-
@app.get("/image-playground", response_class=HTMLResponse)
|
389 |
-
async def image_playground():
|
390 |
-
html_content = read_html_file("image-playground.html")
|
391 |
-
if html_content is None:
|
392 |
-
return HTMLResponse(content="<h1>image-playground.html not found</h1>", status_code=404)
|
393 |
-
return HTMLResponse(content=html_content)
|
394 |
-
|
395 |
-
GITHUB_BASE = "[https://raw.githubusercontent.com/Parthsadaria/Vetra/main](https://raw.githubusercontent.com/Parthsadaria/Vetra/main)"
|
396 |
-
|
397 |
-
FILES = {
|
398 |
-
"html": "index.html",
|
399 |
-
"css": "style.css",
|
400 |
-
"js": "script.js"
|
401 |
-
}
|
402 |
|
403 |
-
async def get_github_file(filename: str) -> str:
|
404 |
-
url = f"{GITHUB_BASE}/{filename}"
|
405 |
-
async with httpx.AsyncClient() as client:
|
406 |
-
res = await client.get(url)
|
407 |
-
return res.text if res.status_code == 200 else None
|
408 |
-
|
409 |
-
@app.get("/vetra", response_class=HTMLResponse)
|
410 |
-
async def serve_vetra():
|
411 |
-
html = await get_github_file(FILES["html"])
|
412 |
-
css = await get_github_file(FILES["css"])
|
413 |
-
js = await get_github_file(FILES["js"])
|
414 |
-
|
415 |
-
if not html:
|
416 |
-
return HTMLResponse(content="<h1>index.html not found on GitHub</h1>", status_code=404)
|
417 |
-
|
418 |
-
final_html = html.replace(
|
419 |
-
"</head>",
|
420 |
-
f"<style>{css or '/* CSS not found */'}</style></head>"
|
421 |
-
).replace(
|
422 |
-
"</body>",
|
423 |
-
f"<script>{js or '// JS not found'}</script></body>"
|
424 |
-
)
|
425 |
-
|
426 |
-
return HTMLResponse(content=final_html)
|
427 |
-
|
428 |
-
@app.get("/api/v1/models")
|
429 |
-
@app.get("/models")
|
430 |
-
async def return_models():
|
431 |
-
return await get_models()
|
432 |
-
|
433 |
-
@app.get("/searchgpt")
|
434 |
-
async def search_gpt(q: str, stream: Optional[bool] = False, systemprompt: Optional[str] = None):
|
435 |
-
if not q:
|
436 |
-
raise HTTPException(status_code=400, detail="Query parameter 'q' is required")
|
437 |
-
|
438 |
-
usage_tracker.record_request(endpoint="/searchgpt")
|
439 |
-
|
440 |
-
queue = await generate_search_async(q, systemprompt=systemprompt, stream=True)
|
441 |
-
|
442 |
-
if stream:
|
443 |
-
async def stream_generator():
|
444 |
-
collected_text = ""
|
445 |
-
while True:
|
446 |
-
item = await queue.get()
|
447 |
-
if item is None:
|
448 |
-
break
|
449 |
-
|
450 |
-
if "error" in item:
|
451 |
-
yield f"data: {json.dumps({'error': item['error']})}\n\n"
|
452 |
-
break
|
453 |
-
|
454 |
-
if "data" in item:
|
455 |
-
yield item["data"]
|
456 |
-
collected_text += item.get("text", "")
|
457 |
-
|
458 |
-
return StreamingResponse(
|
459 |
-
stream_generator(),
|
460 |
-
media_type="text/event-stream"
|
461 |
-
)
|
462 |
-
else:
|
463 |
-
collected_text = ""
|
464 |
-
while True:
|
465 |
-
item = await queue.get()
|
466 |
-
if item is None:
|
467 |
-
break
|
468 |
-
|
469 |
-
if "error" in item:
|
470 |
-
raise HTTPException(status_code=500, detail=item["error"])
|
471 |
-
|
472 |
-
collected_text += item.get("text", "")
|
473 |
-
|
474 |
-
return JSONResponse(content={"response": collected_text})
|
475 |
-
|
476 |
-
header_url = os.getenv('HEADER_URL')
|
477 |
-
@app.post("/chat/completions")
|
478 |
-
@app.post("/api/v1/chat/completions")
|
479 |
-
async def get_completion(payload: Payload, request: Request, authenticated: bool = Depends(verify_api_key)):
|
480 |
-
if not server_status:
|
481 |
-
return JSONResponse(
|
482 |
-
status_code=503,
|
483 |
-
content={"message": "Server is under maintenance. Please try again later."}
|
484 |
-
)
|
485 |
-
|
486 |
-
model_to_use = payload.model or "gpt-4o-mini"
|
487 |
-
|
488 |
-
if available_model_ids and model_to_use not in set(available_model_ids):
|
489 |
-
raise HTTPException(
|
490 |
-
status_code=400,
|
491 |
-
detail=f"Model '{model_to_use}' is not available. Check /models for the available model list."
|
492 |
-
)
|
493 |
-
|
494 |
-
asyncio.create_task(log_request(request, model_to_use))
|
495 |
-
usage_tracker.record_request(model=model_to_use, endpoint="/chat/completions")
|
496 |
-
|
497 |
-
payload_dict = payload.dict()
|
498 |
-
payload_dict["model"] = model_to_use
|
499 |
-
|
500 |
-
stream_enabled = payload_dict.get("stream", True)
|
501 |
-
|
502 |
-
env_vars = get_env_vars()
|
503 |
-
|
504 |
-
target_url_path = "/v1/chat/completions" # Default path
|
505 |
-
|
506 |
-
if model_to_use in mistral_models:
|
507 |
-
endpoint = env_vars['mistral_api']
|
508 |
-
custom_headers = {
|
509 |
-
"Authorization": f"Bearer {env_vars['mistral_key']}"
|
510 |
-
}
|
511 |
-
elif model_to_use in pollinations_models:
|
512 |
-
endpoint = env_vars['secret_api_endpoint_4']
|
513 |
-
custom_headers = {}
|
514 |
-
elif model_to_use in alternate_models:
|
515 |
-
endpoint = env_vars['secret_api_endpoint_2']
|
516 |
-
custom_headers = {}
|
517 |
-
elif model_to_use in claude_3_models:
|
518 |
-
endpoint = env_vars['secret_api_endpoint_5']
|
519 |
-
custom_headers = {}
|
520 |
-
elif model_to_use in gemini_models: # Handle Gemini models
|
521 |
-
endpoint = env_vars['secret_api_endpoint_6']
|
522 |
-
if not endpoint:
|
523 |
-
raise HTTPException(status_code=500, detail="Gemini API endpoint not configured")
|
524 |
-
if not env_vars['gemini_key']:
|
525 |
-
raise HTTPException(status_code=500, detail="GEMINI_KEY not configured")
|
526 |
-
custom_headers = {
|
527 |
-
"Authorization": f"Bearer {env_vars['gemini_key']}"
|
528 |
-
}
|
529 |
-
target_url_path = "/chat/completions" # Use /chat/completions for Gemini
|
530 |
-
else:
|
531 |
-
endpoint = env_vars['secret_api_endpoint']
|
532 |
-
custom_headers = {
|
533 |
-
"Origin": header_url,
|
534 |
-
"Priority": "u=1, i",
|
535 |
-
"Referer": header_url
|
536 |
-
}
|
537 |
-
|
538 |
-
print(f"Using endpoint: {endpoint} with path: {target_url_path} for model: {model_to_use}")
|
539 |
-
|
540 |
-
async def real_time_stream_generator():
|
541 |
-
try:
|
542 |
-
async with httpx.AsyncClient(timeout=60.0) as client:
|
543 |
-
async with client.stream("POST", f"{endpoint}{target_url_path}", json=payload_dict, headers=custom_headers) as response:
|
544 |
-
if response.status_code >= 400:
|
545 |
-
error_messages = {
|
546 |
-
422: "Unprocessable entity. Check your payload.",
|
547 |
-
400: "Bad request. Verify input data.",
|
548 |
-
403: "Forbidden. You do not have access to this resource.",
|
549 |
-
404: "The requested resource was not found.",
|
550 |
-
}
|
551 |
-
detail = error_messages.get(response.status_code, f"Error code: {response.status_code}")
|
552 |
-
raise HTTPException(status_code=response.status_code, detail=detail)
|
553 |
-
|
554 |
-
async for line in response.aiter_lines():
|
555 |
-
if line:
|
556 |
-
yield line + "\n"
|
557 |
-
except httpx.TimeoutException:
|
558 |
-
raise HTTPException(status_code=504, detail="Request timed out")
|
559 |
-
except httpx.RequestError as e:
|
560 |
-
raise HTTPException(status_code=502, detail=f"Failed to connect to upstream API: {str(e)}")
|
561 |
-
except Exception as e:
|
562 |
-
if isinstance(e, HTTPException):
|
563 |
-
raise e
|
564 |
-
raise HTTPException(status_code=500, detail=f"An error occurred: {str(e)}")
|
565 |
-
|
566 |
-
if stream_enabled:
|
567 |
-
return StreamingResponse(
|
568 |
-
real_time_stream_generator(),
|
569 |
-
media_type="text/event-stream",
|
570 |
-
headers={
|
571 |
-
"Content-Type": "text/event-stream",
|
572 |
-
"Cache-Control": "no-cache",
|
573 |
-
"Connection": "keep-alive",
|
574 |
-
"X-Accel-Buffering": "no"
|
575 |
-
}
|
576 |
-
)
|
577 |
-
else:
|
578 |
-
response_content = []
|
579 |
-
async for chunk in real_time_stream_generator():
|
580 |
-
response_content.append(chunk)
|
581 |
-
return JSONResponse(content=json.loads(''.join(response_content)))
|
582 |
-
@app.post("/images/generations")
|
583 |
-
async def create_image(payload: ImageGenerationPayload, authenticated: bool = Depends(verify_api_key)):
|
584 |
-
if not server_status:
|
585 |
-
return JSONResponse(
|
586 |
-
status_code=503,
|
587 |
-
content={"message": "Server is under maintenance. Please try again later."}
|
588 |
-
)
|
589 |
-
|
590 |
-
if payload.model not in supported_image_models:
|
591 |
-
raise HTTPException(
|
592 |
-
status_code=400,
|
593 |
-
detail=f"Model '{payload.model}' is not supported for image generation. Supported models are: {supported_image_models}"
|
594 |
-
)
|
595 |
-
|
596 |
-
usage_tracker.record_request(model=payload.model, endpoint="/images/generations")
|
597 |
-
|
598 |
-
api_payload = {
|
599 |
-
"model": payload.model,
|
600 |
-
"prompt": payload.prompt,
|
601 |
-
"size": payload.size,
|
602 |
-
"number": payload.number
|
603 |
-
}
|
604 |
-
|
605 |
-
target_api_url = os.getenv('NEW_IMG')
|
606 |
-
|
607 |
-
try:
|
608 |
-
async with httpx.AsyncClient(timeout=60.0) as client:
|
609 |
-
response = await client.post(target_api_url, json=api_payload)
|
610 |
|
611 |
-
|
612 |
-
|
613 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
614 |
|
615 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
616 |
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
-
|
|
|
621 |
except Exception as e:
|
622 |
-
|
|
|
|
|
|
|
|
|
|
|
623 |
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
|
|
|
|
628 |
|
629 |
-
@lru_cache(maxsize=10)
|
630 |
-
def get_usage_summary(days=7):
|
631 |
-
return usage_tracker.get_usage_summary(days)
|
632 |
-
|
633 |
-
@app.get("/usage")
|
634 |
-
async def get_usage(days: int = 7):
|
635 |
-
return get_usage_summary(days)
|
636 |
-
|
637 |
-
def generate_usage_html(usage_data):
|
638 |
-
model_usage_rows = "\n".join([
|
639 |
-
f"""
|
640 |
-
<tr>
|
641 |
-
<td>{model}</td>
|
642 |
-
<td>{model_data['total_requests']}</td>
|
643 |
-
<td>{model_data['first_used']}</td>
|
644 |
-
<td>{model_data['last_used']}</td>
|
645 |
-
</tr>
|
646 |
-
""" for model, model_data in usage_data['models'].items()
|
647 |
-
])
|
648 |
|
649 |
-
|
650 |
-
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
</tr>
|
657 |
-
""" for endpoint, endpoint_data in usage_data['api_endpoints'].items()
|
658 |
-
])
|
659 |
|
660 |
-
|
661 |
-
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
|
|
|
|
|
|
670 |
])
|
671 |
|
672 |
-
|
673 |
<!DOCTYPE html>
|
674 |
<html lang="en">
|
675 |
<head>
|
676 |
<meta charset="UTF-8">
|
677 |
-
<
|
678 |
-
<
|
|
|
|
|
679 |
<style>
|
680 |
-
|
681 |
-
|
682 |
-
|
683 |
-
|
684 |
-
|
685 |
-
|
686 |
-
|
687 |
-
|
688 |
-
}}
|
689 |
-
|
690 |
-
|
691 |
-
|
692 |
-
|
693 |
-
|
694 |
-
margin: 0 auto;
|
695 |
-
padding: 40px 20px;
|
696 |
-
line-height: 1.6;
|
697 |
-
}}
|
698 |
-
.logo {{
|
699 |
-
display: flex;
|
700 |
-
align-items: center;
|
701 |
-
justify-content: center;
|
702 |
-
margin-bottom: 30px;
|
703 |
-
}}
|
704 |
-
.logo h1 {{
|
705 |
-
font-weight: 600;
|
706 |
-
font-size: 2.5em;
|
707 |
-
color: var(--text-primary);
|
708 |
-
margin-left: 15px;
|
709 |
-
}}
|
710 |
-
.logo img {{
|
711 |
-
width: 60px;
|
712 |
-
height: 60px;
|
713 |
-
border-radius: 10px;
|
714 |
-
}}
|
715 |
-
.container {{
|
716 |
-
background-color: var(--bg-darker);
|
717 |
-
border-radius: 12px;
|
718 |
-
padding: 30px;
|
719 |
-
box-shadow: 0 15px 40px rgba(0,0,0,0.3);
|
720 |
-
border: 1px solid var(--border-color);
|
721 |
-
}}
|
722 |
-
h2, h3 {{
|
723 |
-
color: var(--text-primary);
|
724 |
-
border-bottom: 2px solid var(--border-color);
|
725 |
-
padding-bottom: 10px;
|
726 |
-
font-weight: 500;
|
727 |
-
}}
|
728 |
-
.total-requests {{
|
729 |
-
background-color: var(--accent-color);
|
730 |
-
color: white;
|
731 |
-
text-align: center;
|
732 |
-
padding: 15px;
|
733 |
-
border-radius: 8px;
|
734 |
-
margin-bottom: 30px;
|
735 |
-
font-weight: 600;
|
736 |
-
letter-spacing: -0.5px;
|
737 |
-
}}
|
738 |
-
table {{
|
739 |
-
width: 100%;
|
740 |
-
border-collapse: separate;
|
741 |
-
border-spacing: 0;
|
742 |
-
margin-bottom: 30px;
|
743 |
-
background-color: var(--bg-dark);
|
744 |
-
border-radius: 8px;
|
745 |
-
overflow: hidden;
|
746 |
-
}}
|
747 |
-
th, td {{
|
748 |
-
border: 1px solid var(--border-color);
|
749 |
-
padding: 12px;
|
750 |
-
text-align: left;
|
751 |
-
transition: background-color 0.3s ease;
|
752 |
-
}}
|
753 |
-
th {{
|
754 |
-
background-color: #1e1e1e;
|
755 |
-
color: var(--text-primary);
|
756 |
-
font-weight: 600;
|
757 |
-
text-transform: uppercase;
|
758 |
-
font-size: 0.9em;
|
759 |
-
}}
|
760 |
-
tr:nth-child(even) {{
|
761 |
-
background-color: rgba(255,255,255,0.05);
|
762 |
-
}}
|
763 |
-
tr:hover {{
|
764 |
-
background-color: rgba(62,100,255,0.1);
|
765 |
-
}}
|
766 |
-
@media (max-width: 768px) {{
|
767 |
-
.container {{
|
768 |
-
padding: 15px;
|
769 |
-
}}
|
770 |
-
table {{
|
771 |
-
font-size: 0.9em;
|
772 |
-
}}
|
773 |
-
}}
|
774 |
</style>
|
775 |
</head>
|
776 |
<body>
|
777 |
<div class="container">
|
778 |
-
<div class="
|
779 |
-
|
780 |
-
<
|
|
|
|
|
781 |
</div>
|
782 |
-
|
783 |
-
|
784 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
785 |
</div>
|
786 |
-
|
787 |
-
<h2>Model Usage</h2>
|
788 |
-
<table>
|
789 |
-
<tr>
|
790 |
-
<th>Model</th>
|
791 |
-
<th>Total Requests</th>
|
792 |
-
<th>First Used</th>
|
793 |
-
<th>Last Used</th>
|
794 |
-
</tr>
|
795 |
-
{model_usage_rows}
|
796 |
-
</table>
|
797 |
-
|
798 |
-
<h2>API Endpoint Usage</h2>
|
799 |
-
<table>
|
800 |
-
<tr>
|
801 |
-
<th>Endpoint</th>
|
802 |
-
<th>Total Requests</th>
|
803 |
-
<th>First Used</th>
|
804 |
-
<th>Last Used</th>
|
805 |
-
</tr>
|
806 |
-
{api_usage_rows}
|
807 |
-
</table>
|
808 |
-
|
809 |
-
<h2>Daily Usage (Last 7 Days)</h2>
|
810 |
-
<table>
|
811 |
-
<tr>
|
812 |
-
<th>Date</th>
|
813 |
-
<th>Entity</th>
|
814 |
-
<th>Requests</th>
|
815 |
-
</tr>
|
816 |
-
{daily_usage_rows}
|
817 |
-
</table>
|
818 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
819 |
</body>
|
820 |
</html>
|
821 |
"""
|
822 |
-
|
823 |
-
|
824 |
-
@lru_cache(maxsize=1)
|
825 |
-
def get_usage_page_html():
|
826 |
-
usage_data = get_usage_summary()
|
827 |
-
return generate_usage_html(usage_data)
|
828 |
-
|
829 |
-
@app.get("/usage/page", response_class=HTMLResponse)
|
830 |
-
async def usage_page():
|
831 |
-
html_content = get_usage_page_html()
|
832 |
-
return HTMLResponse(content=html_content)
|
833 |
-
|
834 |
-
@app.get("/meme")
|
835 |
-
async def get_meme():
|
836 |
-
try:
|
837 |
-
client = get_async_client()
|
838 |
-
response = await client.get("[https://meme-api.com/gimme](https://meme-api.com/gimme)")
|
839 |
-
response_data = response.json()
|
840 |
-
|
841 |
-
meme_url = response_data.get("url")
|
842 |
-
if not meme_url:
|
843 |
-
raise HTTPException(status_code=404, detail="No meme found")
|
844 |
-
|
845 |
-
image_response = await client.get(meme_url, follow_redirects=True)
|
846 |
-
|
847 |
-
async def stream_with_larger_chunks():
|
848 |
-
chunks = []
|
849 |
-
size = 0
|
850 |
-
async for chunk in image_response.aiter_bytes(chunk_size=16384):
|
851 |
-
chunks.append(chunk)
|
852 |
-
size += len(chunk)
|
853 |
-
if size >= 65536:
|
854 |
-
yield b''.join(chunks)
|
855 |
-
chunks = []
|
856 |
-
size = 0
|
857 |
-
if chunks:
|
858 |
-
yield b''.join(chunks)
|
859 |
-
|
860 |
-
return StreamingResponse(
|
861 |
-
stream_with_larger_chunks(),
|
862 |
-
media_type=image_response.headers.get("content-type", "image/png"),
|
863 |
-
headers={'Cache-Control': 'max-age=3600'}
|
864 |
-
)
|
865 |
-
except Exception:
|
866 |
-
raise HTTPException(status_code=500, detail="Failed to retrieve meme")
|
867 |
-
|
868 |
-
def load_model_ids(json_file_path):
|
869 |
-
try:
|
870 |
-
with open(json_file_path, 'r') as f:
|
871 |
-
models_data = json.load(f)
|
872 |
-
return [model['id'] for model in models_data if 'id' in model]
|
873 |
-
except Exception as e:
|
874 |
-
print(f"Error loading model IDs: {str(e)}")
|
875 |
-
return []
|
876 |
-
|
877 |
-
@app.on_event("startup")
|
878 |
-
async def startup_event():
|
879 |
-
global available_model_ids
|
880 |
-
available_model_ids = load_model_ids("models.json")
|
881 |
-
print(f"Loaded {len(available_model_ids)} model IDs")
|
882 |
-
|
883 |
-
available_model_ids.extend(list(pollinations_models))
|
884 |
-
available_model_ids.extend(list(alternate_models))
|
885 |
-
available_model_ids.extend(list(mistral_models))
|
886 |
-
available_model_ids.extend(list(claude_3_models))
|
887 |
-
available_model_ids.extend(list(gemini_models)) # Add Gemini models
|
888 |
-
|
889 |
-
available_model_ids = list(set(available_model_ids))
|
890 |
-
print(f"Total available models: {len(available_model_ids)}")
|
891 |
-
|
892 |
-
for _ in range(MAX_SCRAPERS):
|
893 |
-
scraper_pool.append(cloudscraper.create_scraper())
|
894 |
-
|
895 |
-
env_vars = get_env_vars()
|
896 |
-
missing_vars = []
|
897 |
-
|
898 |
-
if not env_vars['api_keys'] or env_vars['api_keys'] == ['']:
|
899 |
-
missing_vars.append('API_KEYS')
|
900 |
-
if not env_vars['secret_api_endpoint']:
|
901 |
-
missing_vars.append('SECRET_API_ENDPOINT')
|
902 |
-
if not env_vars['secret_api_endpoint_2']:
|
903 |
-
missing_vars.append('SECRET_API_ENDPOINT_2')
|
904 |
-
if not env_vars['secret_api_endpoint_3']:
|
905 |
-
missing_vars.append('SECRET_API_ENDPOINT_3')
|
906 |
-
if not env_vars['secret_api_endpoint_4']:
|
907 |
-
missing_vars.append('SECRET_API_ENDPOINT_4')
|
908 |
-
if not env_vars['secret_api_endpoint_5']:
|
909 |
-
missing_vars.append('SECRET_API_ENDPOINT_5')
|
910 |
-
if not env_vars['secret_api_endpoint_6']: # Check the new endpoint
|
911 |
-
missing_vars.append('SECRET_API_ENDPOINT_6')
|
912 |
-
if not env_vars['mistral_api'] and any(model in mistral_models for model in available_model_ids):
|
913 |
-
missing_vars.append('MISTRAL_API')
|
914 |
-
if not env_vars['mistral_key'] and any(model in mistral_models for model in available_model_ids):
|
915 |
-
missing_vars.append('MISTRAL_KEY')
|
916 |
-
if not env_vars['gemini_key'] and any(model in gemini_models for model in available_model_ids): # Check Gemini key
|
917 |
-
missing_vars.append('GEMINI_KEY')
|
918 |
-
|
919 |
-
if missing_vars:
|
920 |
-
print(f"WARNING: The following environment variables are missing: {', '.join(missing_vars)}")
|
921 |
-
print("Some functionality may be limited.")
|
922 |
-
|
923 |
-
print("Server started successfully!")
|
924 |
-
|
925 |
-
@app.on_event("shutdown")
|
926 |
-
async def shutdown_event():
|
927 |
-
client = get_async_client()
|
928 |
-
await client.aclose()
|
929 |
-
scraper_pool.clear()
|
930 |
-
usage_tracker.save_data()
|
931 |
-
print("Server shutdown complete!")
|
932 |
-
|
933 |
-
@app.get("/health")
|
934 |
-
async def health_check():
|
935 |
-
env_vars = get_env_vars()
|
936 |
-
missing_critical_vars = []
|
937 |
-
|
938 |
-
if not env_vars['api_keys'] or env_vars['api_keys'] == ['']:
|
939 |
-
missing_critical_vars.append('API_KEYS')
|
940 |
-
if not env_vars['secret_api_endpoint']:
|
941 |
-
missing_critical_vars.append('SECRET_API_ENDPOINT')
|
942 |
-
if not env_vars['secret_api_endpoint_2']:
|
943 |
-
missing_critical_vars.append('SECRET_API_ENDPOINT_2')
|
944 |
-
if not env_vars['secret_api_endpoint_3']:
|
945 |
-
missing_critical_vars.append('SECRET_API_ENDPOINT_3')
|
946 |
-
if not env_vars['secret_api_endpoint_4']:
|
947 |
-
missing_critical_vars.append('SECRET_API_ENDPOINT_4')
|
948 |
-
if not env_vars['secret_api_endpoint_5']:
|
949 |
-
missing_critical_vars.append('SECRET_API_ENDPOINT_5')
|
950 |
-
if not env_vars['secret_api_endpoint_6']: # Check the new endpoint
|
951 |
-
missing_critical_vars.append('SECRET_API_ENDPOINT_6')
|
952 |
-
if not env_vars['mistral_api']:
|
953 |
-
missing_critical_vars.append('MISTRAL_API')
|
954 |
-
if not env_vars['mistral_key']:
|
955 |
-
missing_critical_vars.append('MISTRAL_KEY')
|
956 |
-
if not env_vars['gemini_key']: # Check Gemini key
|
957 |
-
missing_critical_vars.append('GEMINI_KEY')
|
958 |
-
|
959 |
-
health_status = {
|
960 |
-
"status": "healthy" if not missing_critical_vars else "unhealthy",
|
961 |
-
"missing_env_vars": missing_critical_vars,
|
962 |
-
"server_status": server_status,
|
963 |
-
"message": "Everything's lit! 🚀" if not missing_critical_vars else "Uh oh, some env vars are missing. 😬"
|
964 |
-
}
|
965 |
-
return JSONResponse(content=health_status)
|
966 |
|
967 |
if __name__ == "__main__":
|
968 |
-
import uvicorn
|
969 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
|
1 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import json
|
3 |
import datetime
|
|
|
|
|
|
|
4 |
import asyncio
|
5 |
+
import re
|
6 |
+
from functools import lru_cache
|
7 |
+
from pathlib import Path
|
8 |
+
from typing import List, Dict, Any, Tuple, Optional
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
import httpx
|
11 |
+
import uvicorn
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
from fastapi import FastAPI, HTTPException, Request, Depends, Security, Query, APIRouter
|
14 |
+
from fastapi.responses import HTMLResponse, JSONResponse, StreamingResponse, FileResponse, PlainTextResponse
|
15 |
+
from fastapi.security import APIKeyHeader
|
16 |
+
from pydantic_settings import BaseSettings
|
17 |
+
from pydantic import BaseModel, Field
|
18 |
+
from starlette.middleware.cors import CORSMiddleware
|
19 |
+
from starlette.middleware.gzip import GZipMiddleware
|
20 |
+
from starlette.status import HTTP_403_FORBIDDEN, HTTP_503_SERVICE_UNAVAILABLE
|
21 |
|
22 |
+
# Use cloudscraper for specific endpoints that need it
|
23 |
+
try:
|
24 |
+
import cloudscraper
|
25 |
+
except ImportError:
|
26 |
+
cloudscraper = None
|
27 |
|
28 |
from usage_tracker import UsageTracker
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
# --- Initial Setup ---
|
31 |
+
load_dotenv()
|
32 |
+
# Use uvloop for better performance if available
|
33 |
+
try:
|
34 |
+
import uvloop
|
35 |
+
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
|
36 |
+
except ImportError:
|
37 |
+
pass
|
38 |
+
|
39 |
+
# --- Configuration Management using Pydantic ---
|
40 |
+
class Settings(BaseSettings):
|
41 |
+
"""Manages all application settings and environment variables in one place."""
|
42 |
+
api_keys: List[str] = Field(..., env="API_KEYS")
|
43 |
+
|
44 |
+
# Endpoints for various model providers
|
45 |
+
secret_api_endpoint: str = Field(..., env="SECRET_API_ENDPOINT")
|
46 |
+
secret_api_endpoint_2: str = Field(..., env="SECRET_API_ENDPOINT_2")
|
47 |
+
secret_api_endpoint_3: str = Field(..., env="SECRET_API_ENDPOINT_3")
|
48 |
+
secret_api_endpoint_4: str = "https://text.pollinations.ai/openai"
|
49 |
+
secret_api_endpoint_5: str = Field(..., env="SECRET_API_ENDPOINT_5")
|
50 |
+
secret_api_endpoint_6: str = Field(..., env="SECRET_API_ENDPOINT_6")
|
51 |
+
|
52 |
+
# Specific provider keys and APIs
|
53 |
+
mistral_api: str = "https://api.mistral.ai"
|
54 |
+
mistral_key: str = Field(..., env="MISTRAL_KEY")
|
55 |
+
gemini_key: str = Field(..., env="GEMINI_KEY")
|
56 |
+
new_img_api: str = Field(..., env="NEW_IMG")
|
57 |
+
|
58 |
+
endpoint_origin: Optional[str] = Field(None, env="ENDPOINT_ORIGIN")
|
59 |
+
header_url: Optional[str] = Field(None, env="HEADER_URL")
|
60 |
+
|
61 |
+
class Config:
|
62 |
+
env_file = '.env'
|
63 |
+
env_file_encoding = 'utf-8'
|
64 |
+
|
65 |
+
@lru_cache()
|
66 |
+
def get_settings():
|
67 |
+
return Settings()
|
68 |
+
|
69 |
+
# --- Pydantic Models for Payloads ---
|
70 |
+
class ChatPayload(BaseModel):
|
71 |
model: str
|
72 |
+
messages: List[Dict[str, Any]]
|
73 |
stream: bool = False
|
74 |
|
75 |
class ImageGenerationPayload(BaseModel):
|
|
|
78 |
size: int
|
79 |
number: int
|
80 |
|
81 |
+
# --- Global Objects & State ---
|
82 |
+
app = FastAPI(
|
83 |
+
title="LokiAI API",
|
84 |
+
version="2.5.0",
|
85 |
+
description="A robust and scalable API proxy for various AI models, now fully rewritten.",
|
86 |
+
)
|
87 |
+
usage_tracker = UsageTracker()
|
88 |
+
api_key_header = APIKeyHeader(name="Authorization", auto_error=False)
|
89 |
+
server_status = {"online": True}
|
90 |
+
|
91 |
+
# --- Model & API Configuration ---
|
92 |
+
MODEL_SETS = {
|
93 |
+
"mistral": {"mistral-large-latest", "codestral-latest", "mistral-small-latest"},
|
94 |
+
"pollinations": {"openai", "gemini", "phi", "llama"},
|
95 |
+
"alternate": {"o1", "grok-3", "sonar-pro"},
|
96 |
+
"claude": {"claude-3-7-sonnet", "claude 3.5 sonnet", "o3-mini-medium"},
|
97 |
+
"gemini": {"gemini-1.5-pro", "gemini-1.5-flash", "gemini-2.0-flash"},
|
98 |
+
"image": {"Flux Pro Ultra", "dall-e-3", "stable-diffusion-3-large-turbo"},
|
99 |
+
}
|
100 |
+
|
101 |
+
def get_api_details(model_name: str, settings: Settings) -> Tuple[str, Dict, str]:
|
102 |
+
"""Returns the endpoint, headers, and path for a given model."""
|
103 |
+
if model_name in MODEL_SETS["mistral"]:
|
104 |
+
return settings.mistral_api, {"Authorization": f"Bearer {settings.mistral_key}"}, "/v1/chat/completions"
|
105 |
+
if model_name in MODEL_SETS["gemini"]:
|
106 |
+
return settings.secret_api_endpoint_6, {"Authorization": f"Bearer {settings.gemini_key}"}, "/chat/completions"
|
107 |
+
if model_name in MODEL_SETS["pollinations"]:
|
108 |
+
return settings.secret_api_endpoint_4, {}, "/v1/chat/completions"
|
109 |
+
if model_name in MODEL_SETS["claude"]:
|
110 |
+
return settings.secret_api_endpoint_5, {}, "/v1/chat/completions"
|
111 |
+
if model_name in MODEL_SETS["alternate"]:
|
112 |
+
return settings.secret_api_endpoint_2, {}, "/v1/chat/completions"
|
113 |
+
if model_name in MODEL_SETS["image"]:
|
114 |
+
return settings.new_img_api, {}, ""
|
115 |
+
|
116 |
+
# Default case
|
117 |
+
headers = {
|
118 |
+
"Origin": settings.header_url, "Referer": settings.header_url
|
119 |
+
} if settings.header_url else {}
|
120 |
+
return settings.secret_api_endpoint, headers, "/v1/chat/completions"
|
121 |
|
122 |
+
# --- Dependencies & Security ---
|
123 |
+
async def get_api_key(request: Request, api_key: str = Security(api_key_header)):
|
124 |
+
"""Validates the API key, allowing specific referers to bypass."""
|
125 |
+
referer = request.headers.get("referer", "")
|
126 |
+
if referer and "parthsadaria-lokiai.hf.space" in referer:
|
127 |
+
return "hf_space_bypass"
|
128 |
+
|
129 |
+
settings = get_settings()
|
130 |
+
if not api_key or not api_key.startswith("Bearer "):
|
131 |
+
raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail="Invalid authorization format.")
|
132 |
+
|
133 |
+
key = api_key.split(" ")[1]
|
134 |
+
if key not in settings.api_keys:
|
135 |
+
raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail="Invalid API key.")
|
136 |
+
return key
|
137 |
+
|
138 |
+
@lru_cache()
|
139 |
+
def get_http_client() -> httpx.AsyncClient:
|
140 |
+
return httpx.AsyncClient(timeout=60.0, limits=httpx.Limits(max_connections=200))
|
141 |
+
|
142 |
+
# --- API Routers ---
|
143 |
+
chat_router = APIRouter(tags=["AI Models"])
|
144 |
+
image_router = APIRouter(tags=["AI Models"])
|
145 |
+
usage_router = APIRouter(tags=["Server Administration"])
|
146 |
+
utility_router = APIRouter(tags=["Utilities & Pages"])
|
147 |
+
|
148 |
+
# --- Chat Completions Router ---
|
149 |
+
@chat_router.post("/chat/completions")
|
150 |
+
async def chat_completions(
|
151 |
+
payload: ChatPayload,
|
152 |
+
request: Request,
|
153 |
+
api_key: str = Depends(get_api_key),
|
154 |
+
client: httpx.AsyncClient = Depends(get_http_client)
|
155 |
+
):
|
156 |
+
if not server_status["online"]:
|
157 |
+
raise HTTPException(status_code=HTTP_503_SERVICE_UNAVAILABLE, detail="Server under maintenance.")
|
158 |
|
159 |
+
settings = get_settings()
|
160 |
+
usage_tracker.record_request(request, payload.model, "/chat/completions")
|
161 |
+
endpoint, headers, path = get_api_details(payload.model, settings)
|
162 |
|
163 |
+
async def stream_generator():
|
164 |
+
try:
|
165 |
+
async with client.stream("POST", f"{endpoint}{path}", json=payload.dict(), headers=headers) as response:
|
166 |
+
response.raise_for_status()
|
167 |
+
async for chunk in response.aiter_bytes():
|
168 |
+
yield chunk
|
169 |
+
except httpx.HTTPStatusError as e:
|
170 |
+
print(f"Upstream error: {e.response.status_code} - {e.response.text}")
|
171 |
+
yield json.dumps({"error": {"code": 502, "message": "Bad Gateway: Upstream service error."}}).encode()
|
172 |
+
except Exception as e:
|
173 |
+
print(f"Streaming error: {e}")
|
174 |
+
yield json.dumps({"error": {"code": 500, "message": "An internal error occurred."}}).encode()
|
175 |
|
176 |
+
return StreamingResponse(stream_generator(), media_type="text/event-stream")
|
177 |
|
178 |
+
# --- Image Generation Router ---
|
179 |
+
@image_router.post("/images/generations")
|
180 |
+
async def images_generations(
|
181 |
+
payload: ImageGenerationPayload,
|
182 |
request: Request,
|
183 |
+
api_key: str = Depends(get_api_key),
|
184 |
+
client: httpx.AsyncClient = Depends(get_http_client)
|
185 |
+
):
|
186 |
+
if not server_status["online"]:
|
187 |
+
raise HTTPException(status_code=HTTP_503_SERVICE_UNAVAILABLE, detail="Server under maintenance.")
|
188 |
+
|
189 |
+
if payload.model not in MODEL_SETS["image"]:
|
190 |
+
raise HTTPException(status_code=400, detail=f"Image model '{payload.model}' not supported.")
|
191 |
+
|
192 |
+
settings = get_settings()
|
193 |
+
usage_tracker.record_request(request, payload.model, "/images/generations")
|
194 |
+
endpoint, headers, _ = get_api_details(payload.model, settings)
|
195 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
try:
|
197 |
+
response = await client.post(endpoint, json=payload.dict(), headers=headers)
|
198 |
+
response.raise_for_status()
|
199 |
+
return JSONResponse(content=response.json())
|
200 |
+
except httpx.HTTPStatusError as e:
|
201 |
+
raise HTTPException(status_code=e.response.status_code, detail=e.response.json().get("detail", "Upstream error"))
|
202 |
+
except httpx.RequestError as e:
|
203 |
+
raise HTTPException(status_code=502, detail=f"Failed to connect to image service: {e}")
|
204 |
+
|
205 |
+
# --- Usage & Health Router ---
|
206 |
+
@usage_router.get("/usage", response_class=HTMLResponse)
|
207 |
+
async def get_usage_dashboard(days: int = Query(7, ge=1, le=30)):
|
208 |
+
summary = usage_tracker.get_usage_summary(days=days)
|
209 |
+
# The generate_usage_html function from the previous version can be used here directly
|
210 |
+
# It has been moved to a separate file or helper for cleanliness in a real app
|
211 |
+
# For this example, it's defined below for completeness.
|
212 |
+
from usage_dashboard_generator import generate_usage_html
|
213 |
+
return HTMLResponse(content=generate_usage_html(summary))
|
214 |
+
|
215 |
+
@usage_router.get("/health")
|
216 |
+
async def health_check():
|
217 |
+
return {"status": "healthy" if server_status["online"] else "unhealthy", "version": app.version}
|
218 |
|
219 |
+
@usage_router.get("/models")
|
220 |
async def get_models():
|
221 |
+
try:
|
222 |
+
with open(Path(__file__).parent / 'models.json', 'r') as f:
|
223 |
+
return json.load(f)
|
224 |
+
except Exception:
|
225 |
+
raise HTTPException(status_code=500, detail="models.json not found or is invalid.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
|
227 |
+
# --- Utility & Pages Router ---
|
228 |
@lru_cache(maxsize=10)
|
229 |
+
def read_static_file(file_path):
|
230 |
try:
|
231 |
+
with open(Path(__file__).parent / file_path, "r", encoding="utf-8") as file:
|
232 |
return file.read()
|
233 |
except FileNotFoundError:
|
234 |
return None
|
235 |
|
236 |
+
@utility_router.get("/", response_class=HTMLResponse)
|
237 |
+
async def root_page():
|
238 |
+
return HTMLResponse(content=read_static_file("index.html") or "<h1>Not Found</h1>")
|
239 |
+
|
240 |
+
@utility_router.get("/playground", response_class=HTMLResponse)
|
241 |
+
async def playground_page():
|
242 |
+
return HTMLResponse(content=read_static_file("playground.html") or "<h1>Not Found</h1>")
|
243 |
+
|
244 |
+
@utility_router.get("/image-playground", response_class=HTMLResponse)
|
245 |
+
async def image_playground_page():
|
246 |
+
return HTMLResponse(content=read_static_file("image-playground.html") or "<h1>Not Found</h1>")
|
247 |
+
|
248 |
+
@utility_router.get("/scraper", response_class=PlainTextResponse)
|
249 |
+
async def scrape_url(url: str = Query(..., description="URL to scrape")):
|
250 |
+
if not cloudscraper:
|
251 |
+
raise HTTPException(status_code=501, detail="Scraper library not installed.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
try:
|
253 |
scraper = cloudscraper.create_scraper()
|
254 |
response = scraper.get(url)
|
255 |
+
response.raise_for_status()
|
256 |
+
return PlainTextResponse(content=response.text)
|
257 |
except Exception as e:
|
258 |
+
raise HTTPException(status_code=500, detail=f"Failed to scrape URL: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
|
261 |
+
# --- Main Application Setup ---
|
262 |
+
app.add_middleware(GZipMiddleware, minimum_size=1000)
|
263 |
+
app.add_middleware(
|
264 |
+
CORSMiddleware,
|
265 |
+
allow_origins=["*"],
|
266 |
+
allow_credentials=True,
|
267 |
+
allow_methods=["*"],
|
268 |
+
allow_headers=["*"],
|
269 |
+
)
|
270 |
|
271 |
+
# Include all the organized routers
|
272 |
+
app.include_router(chat_router, prefix="/api/v1")
|
273 |
+
app.include_router(chat_router) # For legacy /chat/completions
|
274 |
+
app.include_router(image_router, prefix="/api/v1")
|
275 |
+
app.include_router(image_router) # For legacy /images/generations
|
276 |
+
app.include_router(usage_router)
|
277 |
+
app.include_router(utility_router)
|
278 |
|
279 |
+
@app.on_event("startup")
|
280 |
+
async def startup_event():
|
281 |
+
# Pre-load settings and client to catch config errors early
|
282 |
+
try:
|
283 |
+
get_settings()
|
284 |
except Exception as e:
|
285 |
+
print(f"FATAL: Could not load settings from environment variables. Error: {e}")
|
286 |
+
# In a real app, you might want to exit here
|
287 |
+
get_http_client()
|
288 |
+
print("--- LokiAI Server Started ---")
|
289 |
+
print(f"Version: {app.version}")
|
290 |
+
print("Usage tracking is active and will save data periodically.")
|
291 |
|
292 |
+
@app.on_event("shutdown")
|
293 |
+
async def shutdown_event():
|
294 |
+
client = get_http_client()
|
295 |
+
await client.aclose()
|
296 |
+
usage_tracker.save_data()
|
297 |
+
print("--- LokiAI Server Shutdown Complete ---")
|
298 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
|
300 |
+
# Helper for usage dashboard - in a real project, this would be in its own file
|
301 |
+
# I'm creating it here to make the example self-contained
|
302 |
+
if not (Path(__file__).parent / "usage_dashboard_generator.py").exists():
|
303 |
+
with open(Path(__file__).parent / "usage_dashboard_generator.py", "w") as f:
|
304 |
+
f.write('''
|
305 |
+
import json
|
306 |
+
import datetime
|
|
|
|
|
|
|
307 |
|
308 |
+
def generate_usage_html(usage_data: dict) -> str:
|
309 |
+
model_labels = json.dumps(list(usage_data['model_usage'].keys()))
|
310 |
+
model_values = json.dumps(list(usage_data['model_usage'].values()))
|
311 |
+
daily_labels = json.dumps(list(usage_data['daily_usage'].keys()))
|
312 |
+
daily_values = json.dumps([v['requests'] for v in usage_data['daily_usage'].values()])
|
313 |
+
|
314 |
+
recent_requests_rows = "".join([
|
315 |
+
f"""<tr>
|
316 |
+
<td>{datetime.datetime.fromisoformat(req['timestamp']).strftime('%Y-%m-%d %H:%M:%S')}</td>
|
317 |
+
<td>{req['model']}</td>
|
318 |
+
<td>{req['endpoint']}</td>
|
319 |
+
<td>{req['ip_address']}</td>
|
320 |
+
</tr>""" for req in usage_data['recent_requests']
|
321 |
])
|
322 |
|
323 |
+
return f"""
|
324 |
<!DOCTYPE html>
|
325 |
<html lang="en">
|
326 |
<head>
|
327 |
<meta charset="UTF-8">
|
328 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
329 |
+
<title>LokiAI - Usage Statistics</title>
|
330 |
+
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
331 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700&display=swap" rel="stylesheet">
|
332 |
<style>
|
333 |
+
body {{ font-family: 'Inter', sans-serif; background-color: #0B0F19; color: #E0E0E0; margin: 0; padding: 20px; }}
|
334 |
+
.container {{ max-width: 1400px; margin: auto; }}
|
335 |
+
h1, h2 {{ color: #FFFFFF; }}
|
336 |
+
.header {{ text-align: center; margin-bottom: 40px; }}
|
337 |
+
.header h1 {{ font-size: 3em; font-weight: 700; }}
|
338 |
+
.stats-grid {{ display: grid; grid-template-columns: repeat(auto-fit, minmax(280px, 1fr)); gap: 20px; margin-bottom: 40px; }}
|
339 |
+
.chart-grid {{ display: grid; grid-template-columns: 1fr 1fr; gap: 20px; margin-bottom: 40px; }}
|
340 |
+
.stat-card, .chart-container, .table-container {{ background: #1A2035; padding: 25px; border-radius: 12px; border: 1px solid #2A3045; }}
|
341 |
+
.stat-card h3 {{ margin-top: 0; color: #8E95A9; font-size: 1em; font-weight: 600; text-transform: uppercase; }}
|
342 |
+
.stat-card .value {{ font-size: 2.5em; font-weight: 700; color: #FFFFFF; }}
|
343 |
+
table {{ width: 100%; border-collapse: collapse; }}
|
344 |
+
th, td {{ padding: 14px; text-align: left; border-bottom: 1px solid #2A3045; }}
|
345 |
+
th {{ background-color: #2A3045; font-weight: 600; }}
|
346 |
+
@media (max-width: 768px) {{ .chart-grid {{ grid-template-columns: 1fr; }} }}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
347 |
</style>
|
348 |
</head>
|
349 |
<body>
|
350 |
<div class="container">
|
351 |
+
<div class="header"><h1>LokiAI Usage Dashboard</h1></div>
|
352 |
+
<div class="stats-grid">
|
353 |
+
<div class="stat-card"><h3>Total Requests</h3><p class="value">{usage_data['total_requests']}</p></div>
|
354 |
+
<div class="stat-card"><h3>Unique IPs (All Time)</h3><p class="value">{usage_data['unique_ip_count']}</p></div>
|
355 |
+
<div class="stat-card"><h3>Models Used (Last 7 Days)</h3><p class="value">{len(usage_data['model_usage'])}</p></div>
|
356 |
</div>
|
357 |
+
<div class="chart-grid">
|
358 |
+
<div class="chart-container"><canvas id="dailyUsageChart"></canvas></div>
|
359 |
+
<div class="chart-container"><canvas id="modelUsageChart"></canvas></div>
|
360 |
+
</div>
|
361 |
+
<div class="table-container">
|
362 |
+
<h2>Recent Requests</h2>
|
363 |
+
<table>
|
364 |
+
<thead><tr><th>Timestamp (UTC)</th><th>Model</th><th>Endpoint</th><th>IP Address</th></tr></thead>
|
365 |
+
<tbody>{recent_requests_rows}</tbody>
|
366 |
+
</table>
|
367 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
368 |
</div>
|
369 |
+
<script>
|
370 |
+
const chartOptions = (ticksColor, gridColor) => ({{
|
371 |
+
plugins: {{ legend: {{ labels: {{ color: ticksColor }} }} }},
|
372 |
+
scales: {{
|
373 |
+
y: {{ ticks: {{ color: ticksColor }}, grid: {{ color: gridColor }} }},
|
374 |
+
x: {{ ticks: {{ color: ticksColor }}, grid: {{ color: 'transparent' }} }}
|
375 |
+
}}
|
376 |
+
}});
|
377 |
+
new Chart(document.getElementById('dailyUsageChart'), {{
|
378 |
+
type: 'line',
|
379 |
+
data: {{ labels: {daily_labels}, datasets: [{{ label: 'Requests per Day', data: {daily_values}, borderColor: '#3a6ee0', tension: 0.1, backgroundColor: 'rgba(58, 110, 224, 0.2)', fill: true }}] }},
|
380 |
+
options: chartOptions('#E0E0E0', '#2A3045')
|
381 |
+
}});
|
382 |
+
new Chart(document.getElementById('modelUsageChart'), {{
|
383 |
+
type: 'doughnut',
|
384 |
+
data: {{ labels: {model_labels}, datasets: [{{ label: 'Model Usage', data: {model_values}, backgroundColor: ['#3A6EE0', '#E94F37', '#44AF69', '#F4D35E', '#A06CD5'] }}] }},
|
385 |
+
options: {{ plugins: {{ legend: {{ position: 'right', labels: {{ color: '#E0E0E0' }} }} }} }}
|
386 |
+
}});
|
387 |
+
</script>
|
388 |
</body>
|
389 |
</html>
|
390 |
"""
|
391 |
+
''')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
392 |
|
393 |
if __name__ == "__main__":
|
|
|
394 |
uvicorn.run(app, host="0.0.0.0", port=7860)
|
395 |
+
|