Spaces:
Running
Running
File size: 5,390 Bytes
f7c0abb 984a117 e7b1f60 fa8e2ce 256ed7f 984a117 f7c0abb 05d6121 f9d8346 984a117 e7b1f60 256ed7f e7b1f60 984a117 256ed7f 465b43c 984a117 fa8e2ce 6025f1c 256ed7f e7b1f60 984a117 6025f1c 984a117 f7c0abb 984a117 f7c0abb 984a117 f7c0abb 984a117 05d6121 e7b1f60 984a117 e7b1f60 05d6121 b9e465f 984a117 256ed7f fa8e2ce 256ed7f 93c4b1f 7a83ce6 20d0b59 984a117 256ed7f 984a117 256ed7f 984a117 256ed7f 387e225 256ed7f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import os
import httpx
from fastapi import FastAPI, HTTPException, Query
from fastapi.responses import StreamingResponse
from collections import defaultdict
from typing import AsyncGenerator
app = FastAPI()
# Model list (unchanged)
AVAILABLE_MODELS = {
"openai/gpt-4.1": "OpenAI GPT-4.1",
"openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
"openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
"openai/gpt-4o": "OpenAI GPT-4o",
"openai/gpt-4o-mini": "OpenAI GPT-4o mini",
"openai/o4-mini": "OpenAI o4-mini",
"microsoft/MAI-DS-R1": "MAI-DS-R1",
"microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
"microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
"microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
"microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
"microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
"microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
"microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
"microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
"microsoft/Phi-4": "Phi-4",
"microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
"microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
"ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
"ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
"mistral-ai/Codestral-2501": "Codestral 25.01",
"cohere/Cohere-command-r": "Cohere Command R",
"cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
"cohere/Cohere-command-r-plus": "Cohere Command R+",
"cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
"deepseek/DeepSeek-R1": "DeepSeek-R1",
"deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
"meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
"meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
"meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
"meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
"meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
"meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
"meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
"meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
"meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
"meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
"mistral-ai/Ministral-3B": "Ministral 3B",
"mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
"mistral-ai/Mistral-Nemo": "Mistral Nemo",
"mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
"mistral-ai/Mistral-small": "Mistral Small",
"cohere/cohere-command-a": "Cohere Command A",
"core42/jais-30b-chat": "JAIS 30b Chat",
"mistral-ai/mistral-small-2503": "Mistral Small 3.1"
}
# In-memory history
chat_histories = defaultdict(list)
# Async generator for AI response
async def generate_ai_response(chat_id: str, model: str) -> AsyncGenerator[str, None]:
token = os.getenv("GITHUB_TOKEN")
if not token:
raise HTTPException(status_code=500, detail="GitHub token not configured")
if model not in AVAILABLE_MODELS:
raise HTTPException(status_code=400, detail=f"Invalid model. Choose from: {', '.join(AVAILABLE_MODELS.keys())}")
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
payload = {
"model": model,
"messages": chat_histories[chat_id],
"stream": True,
"temperature": 1.0,
"top_p": 1.0
}
async with httpx.AsyncClient(timeout=60.0) as client:
try:
async with client.stream("POST", "https://models.github.ai/inference", headers=headers, json=payload) as response:
async for line in response.aiter_lines():
if line.startswith("data:"):
data = line[len("data:"):].strip()
if data == "[DONE]":
break
if data:
yield f"{data}\n"
# Optionally: append to chat history
chat_histories[chat_id].append({"role": "assistant", "content": data})
except Exception as e:
yield f"Error: {str(e)}"
# Generate response endpoint
@app.post("/generate")
async def generate_response(
chat_id: str = Query(..., description="Chat session ID"),
prompt: str = Query(..., description="User input message"),
model: str = Query("openai/gpt-4.1-mini", description="Model to use")
):
if not prompt:
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
chat_histories[chat_id].append({"role": "user", "content": prompt})
return StreamingResponse(
generate_ai_response(chat_id, model),
media_type="text/event-stream"
)
# Reset chat history endpoint
@app.post("/reset")
async def reset_chat(chat_id: str = Query(...)):
if chat_id in chat_histories:
chat_histories[chat_id].clear()
return {"message": f"Chat {chat_id} history reset."}
raise HTTPException(status_code=404, detail="Chat ID not found")
def get_app():
return app
|