Spaces:
Sleeping
Sleeping
Fixx
Browse files- models/text_to_image.py +4 -4
- routes/textToImage.py +18 -1
models/text_to_image.py
CHANGED
@@ -5,10 +5,10 @@ from typing import List, Optional
|
|
5 |
class TextToImageRequest(BaseModel):
|
6 |
prompt: str = Field(..., description="The prompt to generate an image from.")
|
7 |
negative_prompt: Optional[List[str]] = Field(None, description="One or several prompts to guide what NOT to include in image generation.")
|
8 |
-
height: Optional[float] = Field(None, description="The height in pixels of the image to generate.")
|
9 |
-
width: Optional[float] = Field(None, description="The width in pixels of the image to generate.")
|
10 |
-
num_inference_steps: Optional[int] = Field(None, description="The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.")
|
11 |
-
guidance_scale: Optional[float] = Field(None, description="A higher guidance scale value encourages the model to generate images closely linked to the text prompt, but values too high may cause saturation and other artifacts.")
|
12 |
model: Optional[str] = Field(None, description="The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. If not provided, the default recommended text-to-image model will be used.")
|
13 |
scheduler: Optional[str] = Field(None, description="Override the scheduler with a compatible one.")
|
14 |
#target_size: Optional[TextToImageTargetSize] = Field(None, description="The size in pixel of the output image")
|
|
|
5 |
class TextToImageRequest(BaseModel):
|
6 |
prompt: str = Field(..., description="The prompt to generate an image from.")
|
7 |
negative_prompt: Optional[List[str]] = Field(None, description="One or several prompts to guide what NOT to include in image generation.")
|
8 |
+
height: Optional[float] = Field(None, description="The height in pixels of the image to generate.", ge=64, le=2048)
|
9 |
+
width: Optional[float] = Field(None, description="The width in pixels of the image to generate.", ge=64, le=2048)
|
10 |
+
num_inference_steps: Optional[int] = Field(None, description="The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.", ge=1, le=500)
|
11 |
+
guidance_scale: Optional[float] = Field(None, description="A higher guidance scale value encourages the model to generate images closely linked to the text prompt, but values too high may cause saturation and other artifacts.", ge=1, le=20)
|
12 |
model: Optional[str] = Field(None, description="The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. If not provided, the default recommended text-to-image model will be used.")
|
13 |
scheduler: Optional[str] = Field(None, description="Override the scheduler with a compatible one.")
|
14 |
#target_size: Optional[TextToImageTargetSize] = Field(None, description="The size in pixel of the output image")
|
routes/textToImage.py
CHANGED
@@ -22,4 +22,21 @@ async def text_to_image(t2i_body: TextToImageRequest):
|
|
22 |
img_byte_arr = io.BytesIO()
|
23 |
res.save(img_byte_arr, format="PNG")
|
24 |
img_byte_arr.seek(0)
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
img_byte_arr = io.BytesIO()
|
23 |
res.save(img_byte_arr, format="PNG")
|
24 |
img_byte_arr.seek(0)
|
25 |
+
try:
|
26 |
+
res = client.text_to_image(
|
27 |
+
prompt=t2i_body.prompt,
|
28 |
+
negative_prompt=t2i_body.negative_prompt,
|
29 |
+
height=t2i_body.height,
|
30 |
+
width=t2i_body.width,
|
31 |
+
num_inference_steps=t2i_body.num_inference_steps,
|
32 |
+
guidance_scale=t2i_body.guidance_scale,
|
33 |
+
scheduler=t2i_body.scheduler,
|
34 |
+
seed=t2i_body.seed
|
35 |
+
)
|
36 |
+
img_byte_arr = io.BytesIO()
|
37 |
+
res.save(img_byte_arr, format="PNG")
|
38 |
+
img_byte_arr.seek(0)
|
39 |
+
return Response(content=img_byte_arr.getvalue(), media_type="image/png")
|
40 |
+
except Exception as e:
|
41 |
+
print(f"Error generating image: {e}")
|
42 |
+
return {"error": str(e)}, 500
|