Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -154,7 +154,7 @@ def get_image_size(aspect_ratio):
|
|
154 |
if aspect_ratio == "1:1":
|
155 |
return 1328, 1328
|
156 |
elif aspect_ratio == "16:9":
|
157 |
-
return
|
158 |
elif aspect_ratio == "9:16":
|
159 |
return 928, 1664
|
160 |
elif aspect_ratio == "4:3":
|
@@ -178,7 +178,7 @@ def infer(
|
|
178 |
aspect_ratio="16:9",
|
179 |
guidance_scale=4.0,
|
180 |
num_inference_steps=50,
|
181 |
-
prompt_enhance=True,
|
182 |
progress=gr.Progress(track_tqdm=True),
|
183 |
):
|
184 |
"""
|
@@ -197,8 +197,8 @@ def infer(
|
|
197 |
generator = torch.Generator(device=device).manual_seed(seed)
|
198 |
|
199 |
print(f"Calling pipeline with prompt: '{prompt}'")
|
200 |
-
if prompt_enhance:
|
201 |
-
|
202 |
print(f"Actual Prompt: '{prompt}'")
|
203 |
print(f"Negative Prompt: '{negative_prompt}'")
|
204 |
print(f"Seed: {seed}, Size: {width}x{height}, Steps: {num_inference_steps}, Guidance: {guidance_scale}")
|
@@ -287,7 +287,7 @@ with gr.Blocks(css=css) as demo:
|
|
287 |
minimum=1,
|
288 |
maximum=50,
|
289 |
step=1,
|
290 |
-
value=
|
291 |
)
|
292 |
|
293 |
gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|
@@ -303,7 +303,7 @@ with gr.Blocks(css=css) as demo:
|
|
303 |
aspect_ratio,
|
304 |
guidance_scale,
|
305 |
num_inference_steps,
|
306 |
-
prompt_enhance,
|
307 |
],
|
308 |
outputs=[result, seed],
|
309 |
)
|
|
|
154 |
if aspect_ratio == "1:1":
|
155 |
return 1328, 1328
|
156 |
elif aspect_ratio == "16:9":
|
157 |
+
return 832, 464
|
158 |
elif aspect_ratio == "9:16":
|
159 |
return 928, 1664
|
160 |
elif aspect_ratio == "4:3":
|
|
|
178 |
aspect_ratio="16:9",
|
179 |
guidance_scale=4.0,
|
180 |
num_inference_steps=50,
|
181 |
+
# prompt_enhance=True,
|
182 |
progress=gr.Progress(track_tqdm=True),
|
183 |
):
|
184 |
"""
|
|
|
197 |
generator = torch.Generator(device=device).manual_seed(seed)
|
198 |
|
199 |
print(f"Calling pipeline with prompt: '{prompt}'")
|
200 |
+
# if prompt_enhance:
|
201 |
+
# prompt = rewrite(prompt)
|
202 |
print(f"Actual Prompt: '{prompt}'")
|
203 |
print(f"Negative Prompt: '{negative_prompt}'")
|
204 |
print(f"Seed: {seed}, Size: {width}x{height}, Steps: {num_inference_steps}, Guidance: {guidance_scale}")
|
|
|
287 |
minimum=1,
|
288 |
maximum=50,
|
289 |
step=1,
|
290 |
+
value=24,
|
291 |
)
|
292 |
|
293 |
gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|
|
|
303 |
aspect_ratio,
|
304 |
guidance_scale,
|
305 |
num_inference_steps,
|
306 |
+
# prompt_enhance,
|
307 |
],
|
308 |
outputs=[result, seed],
|
309 |
)
|