Spaces:
Sleeping
Sleeping
File size: 1,528 Bytes
5b01a80 ee3cf74 67c9fd7 5b01a80 30451f1 67c9fd7 5b01a80 b39b086 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
from fastapi import APIRouter, Response
from models.text_to_image import TextToImageRequest
from huggingface_hub import InferenceClient
import io
router = APIRouter()
@router.post("/v1/images/generations", tags=["Text to Image"])
async def text_to_image(t2i_body: TextToImageRequest):
client = InferenceClient(model=t2i_body.model)
res = client.text_to_image(
t2i_body.prompt,
negative_prompt=t2i_body.negative_prompt,
height=t2i_body.height,
width=t2i_body.width,
num_inference_steps=t2i_body.num_inference_steps,
guidance_scale=t2i_body.guidance_scale,
scheduler=t2i_body.scheduler,
# target_size=t2i_body.target_size,
seed=t2i_body.seed
)
img_byte_arr = io.BytesIO()
res.save(img_byte_arr, format="PNG")
img_byte_arr.seek(0)
try:
res = client.text_to_image(
prompt=t2i_body.prompt,
negative_prompt=t2i_body.negative_prompt,
height=t2i_body.height,
width=t2i_body.width,
num_inference_steps=t2i_body.num_inference_steps,
guidance_scale=t2i_body.guidance_scale,
scheduler=t2i_body.scheduler,
seed=t2i_body.seed
)
img_byte_arr = io.BytesIO()
res.save(img_byte_arr, format="PNG")
img_byte_arr.seek(0)
return Response(content=img_byte_arr.getvalue(), media_type="image/png")
except Exception as e:
print(f"Error generating image: {e}")
return {"error": str(e)}, 500 |