File size: 5,263 Bytes
f7c0abb
0ca6f76
e7b1f60
fa8e2ce
0ca6f76
256ed7f
f7c0abb
05d6121
f9d8346
0ca6f76
a980bb9
5895a8c
a980bb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e7b1f60
 
a980bb9
 
91228d1
 
a980bb9
 
 
b1e7267
 
a980bb9
b1e7267
a980bb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b685be0
6025f1c
a980bb9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05d6121
a980bb9
 
 
 
e7b1f60
a980bb9
 
0ca6f76
a980bb9
 
 
256ed7f
a980bb9
 
 
 
 
 
 
20d0b59
256ed7f
b1e7267
 
 
 
a980bb9
b1e7267
a980bb9
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import os
import asyncio
from fastapi import FastAPI, HTTPException, Query
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
from collections import defaultdict

app = FastAPI()

# Define available models

AVAILABLE_MODELS = {
"openai/gpt-4.1": "OpenAI GPT-4.1",
"openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
"openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
"openai/gpt-4o": "OpenAI GPT-4o",
"openai/gpt-4o-mini": "OpenAI GPT-4o mini",
"openai/o4-mini": "OpenAI o4-mini",
"microsoft/MAI-DS-R1": "MAI-DS-R1",
"microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
"microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
"microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
"microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
"microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
"microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
"microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
"microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
"microsoft/Phi-4": "Phi-4",
"microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
"microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
"ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
"ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
"mistral-ai/Codestral-2501": "Codestral 25.01",
"cohere/Cohere-command-r": "Cohere Command R",
"cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
"cohere/Cohere-command-r-plus": "Cohere Command R+",
"cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
"deepseek/DeepSeek-R1": "DeepSeek-R1",
"deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
"meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
"meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
"meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
"meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
"meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
"meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
"meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
"meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
"meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
"meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
"mistral-ai/Ministral-3B": "Ministral 3B",
"mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
"mistral-ai/Mistral-Nemo": "Mistral Nemo",
"mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
"mistral-ai/Mistral-small": "Mistral Small",
"cohere/cohere-command-a": "Cohere Command A",
"core42/jais-30b-chat": "JAIS 30b Chat",
"mistral-ai/mistral-small-2503": "Mistral Small 3.1"
}

# Chat memory (in-memory)

chat_histories = defaultdict(list)
MAX_HISTORY = 100  # limit memory to avoid crashes

# Generate response stream

async def generate_ai_response(chat_id: str, model: str):
token = os.getenv("GITHUB_TOKEN")
if not token:
raise HTTPException(status_code=500, detail="GitHub token not configured")

```
endpoint = "https://models.github.ai/inference"

if model not in AVAILABLE_MODELS:
    raise HTTPException(
        status_code=400,
        detail=f"Model not available. Choose from: {', '.join(AVAILABLE_MODELS.keys())}"
    )

client = AsyncOpenAI(base_url=endpoint, api_key=token)

try:
    stream = await asyncio.wait_for(
        client.chat.completions.create(
            messages=chat_histories[chat_id],
            model=model,
            temperature=1.0,
            top_p=1.0,
            stream=True
        ),
        timeout=60  # Prevent hangs
    )

    async for chunk in stream:
        if chunk.choices and chunk.choices[0].delta.content:
            content = chunk.choices[0].delta.content
            yield content
            chat_histories[chat_id].append({"role": "assistant", "content": content})
            chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:]

except asyncio.TimeoutError:
    yield "Error: Response timed out."
    raise HTTPException(status_code=504, detail="Model timed out.")
except Exception as err:
    yield f"Error: {str(err)}"
    raise HTTPException(status_code=500, detail="AI generation failed")
```

# Chat endpoint

@app.post("/generate")
async def generate\_response(
chat\_id: str = Query(..., description="Unique chat ID"),
prompt: str = Query(..., description="User message"),
model: str = Query("openai/gpt-4.1-mini", description="Model to use")
):
if not prompt:
raise HTTPException(status\_code=400, detail="Prompt cannot be empty")

```
chat_histories[chat_id].append({"role": "user", "content": prompt})
chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:]

return StreamingResponse(
    generate_ai_response(chat_id, model),
    media_type="text/event-stream"
)
```

# Optional: reset chat history

@app.post("/reset")
async def reset_chat(chat_id: str = Query(..., description="ID of chat to reset")):
if chat_id in chat_histories:
chat_histories\[chat_id].clear()
return {"message": f"Chat {chat_id} history reset."}
else:
raise HTTPException(status_code=404, detail="Chat ID not found")

# For ASGI servers like Uvicorn

def get\_app():
return app