ngd1210 commited on
Commit
5b01a80
·
1 Parent(s): fbbf18e

Add text-to-image route to the application

Browse files

- Added new textToImage router
- Imported the router from routes
- Included the router in the FastAPI app

Files changed (3) hide show
  1. app.py +3 -2
  2. models/text_to_image.py +15 -0
  3. routes/textToImage.py +25 -0
app.py CHANGED
@@ -1,6 +1,6 @@
1
  from huggingface_hub import InferenceClient
2
  from fastapi import FastAPI
3
- from routes import chatCompletion
4
 
5
  app = FastAPI()
6
 
@@ -8,4 +8,5 @@ app = FastAPI()
8
  async def root():
9
  return {"message": "Hello World"}
10
 
11
- app.include_router(chatCompletion.router)
 
 
1
  from huggingface_hub import InferenceClient
2
  from fastapi import FastAPI
3
+ from routes import chatCompletion, textToImage
4
 
5
  app = FastAPI()
6
 
 
8
  async def root():
9
  return {"message": "Hello World"}
10
 
11
+ app.include_router(chatCompletion.router)
12
+ app.include_router(textToImage.router)
models/text_to_image.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+ from huggingface_hub import TextToImageTargetSize
3
+ from typing import List, Optional
4
+
5
+ class TextToImageRequest(BaseModel):
6
+ prompt: str = Field(..., description="The prompt to generate an image from.")
7
+ negative_prompt: Optional[List[str]] = Field(None, description="One or several prompts to guide what NOT to include in image generation.")
8
+ height: Optional[float] = Field(None, description="The height in pixels of the image to generate.")
9
+ width: Optional[float] = Field(None, description="The width in pixels of the image to generate.")
10
+ num_inference_steps: Optional[int] = Field(None, description="The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.")
11
+ guidance_scale: Optional[float] = Field(None, description="A higher guidance scale value encourages the model to generate images closely linked to the text prompt, but values too high may cause saturation and other artifacts.")
12
+ model: Optional[str] = Field(None, description="The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. If not provided, the default recommended text-to-image model will be used.")
13
+ scheduler: Optional[str] = Field(None, description="Override the scheduler with a compatible one.")
14
+ target_size: Optional[TextToImageTargetSize] = Field(None, description="The size in pixel of the output image")
15
+ seed: Optional[int] = Field(None, description="Seed for the random number generator.")
routes/textToImage.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Response
2
+ from models.text_to_image import TextToImageRequest
3
+ from huggingface_hub import InferenceClient
4
+ import io
5
+
6
+ router = APIRouter()
7
+
8
+ @router.post("/vi/images/generations", tags=["Text to Image"])
9
+ async def text_to_image(body: TextToImageRequest):
10
+ client = InferenceClient(model=body.model)
11
+ res = client.text_to_image(
12
+ prompt=body.prompt,
13
+ negative_prompt=body.negative_prompt,
14
+ height=body.height,
15
+ width=body.width,
16
+ num_inference_steps=body.num_inference_steps,
17
+ guidance_scale=body.guidance_scale,
18
+ scheduler=body.scheduler,
19
+ target_size=body.target_size,
20
+ seed=body.seed
21
+ )
22
+ img_byte_arr = io.BytesIO()
23
+ res.save(img_byte_arr, format="PNG")
24
+ img_byte_arr.seek(0)
25
+ return Response(content=img_byte_arr.getvalue(), media_type="image/png")