import os from fastapi import FastAPI, HTTPException from fastapi.responses import StreamingResponse import openai # Use OpenAI's official API library from pydantic import BaseModel # Initialize FastAPI app app = FastAPI() # Define request body model for the prompt class PromptRequest(BaseModel): prompt: str # Initialize OpenAI client token = os.getenv("GITHUB_TOKEN") if not token: raise ValueError("GITHUB_TOKEN environment variable not set") # Initialize OpenAI API client with API key openai.api_key = token # Set the OpenAI API key # Async generator to stream chunks from OpenAI's API async def stream_response(prompt: str): try: # Create streaming chat completion with OpenAI API response = openai.ChatCompletion.create( model="gpt-4", # Replace with the model you're using (e.g., gpt-3.5-turbo or gpt-4) messages=[ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt} ], temperature=1.0, top_p=1.0, stream=True # Enable streaming ) # Yield each chunk of the response as it arrives for chunk in response: content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "") if content: yield content # Yield the generated content except Exception as err: yield f"Error: {str(err)}" # Endpoint to handle the prompt and stream response @app.post("/generate") async def generate_response(request: PromptRequest): try: # Return a StreamingResponse with the async generator return StreamingResponse( stream_response(request.prompt), media_type="text/event-stream" # Use text/event-stream for streaming ) except Exception as err: raise HTTPException(status_code=500, detail=f"Server error: {str(err)}") # Health check endpoint for Hugging Face Spaces @app.get("/") async def health_check(): return {"status": "healthy"}