Spaces:
Sleeping
Sleeping
from fastapi import FastAPI, Request | |
from pydantic import BaseModel | |
from fastapi.middleware.cors import CORSMiddleware | |
from g4f.client import Client | |
from fastapi.responses import StreamingResponse | |
# Initialize the AI client | |
client = Client() | |
# FastAPI app | |
app = FastAPI() | |
# CORS Middleware (so JS from browser can access it too) | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], # Change "*" to your frontend URL for better security | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
# Request body model | |
class Question(BaseModel): | |
question: str | |
async def generate_response_chunks(prompt: str): | |
try: | |
response = client.chat.completions.create( | |
model="gpt-4", # Use a supported model | |
messages=[ | |
{"role": "user", "content": prompt}, | |
{"role": "system", "content": "You are a helpful AI assistant created by abdullah ali who is very intelegent and he is 13 years old and live in lahore."} | |
], | |
stream=True # Enable streaming | |
) | |
for part in response: | |
content = part.choices[0].delta.content | |
if content: | |
yield content | |
except Exception as e: | |
yield f"Error occurred: {e}" | |
async def ask(question: Question): | |
return StreamingResponse( | |
generate_response_chunks(question.question), | |
media_type="text/plain" | |
) |