demo-falc-api / app.py
SpiceyToad's picture
initial commit
7e405ea
raw
history blame
892 Bytes
from fastapi import FastAPI, Request
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
app = FastAPI()
# Load the Falcon 7B model and tokenizer
MODEL_NAME = "SpiceyToad/demo-falc" # Replace with your Hub repo name
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.bfloat16, device_map="auto")
@app.post("/generate")
async def generate_text(request: Request):
# Parse input JSON
data = await request.json()
prompt = data.get("prompt", "")
max_length = data.get("max_length", 50)
# Tokenize input and generate text
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
outputs = model.generate(inputs["input_ids"], max_length=max_length)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"generated_text": response}