Spaces:
Running
on
Zero
Running
on
Zero
evalstate
commited on
Commit
·
8c9766e
1
Parent(s):
ac783de
add size options for output testing
Browse files
app.py
CHANGED
@@ -19,9 +19,25 @@ pipe = QwenImagePipeline.from_pretrained("Qwen/Qwen-Image", torch_dtype=dtype).t
|
|
19 |
# --- UI Constants and Helpers ---
|
20 |
MAX_SEED = np.iinfo(np.int32).max
|
21 |
|
|
|
|
|
|
|
|
|
22 |
def get_image_size(aspect_ratio):
|
23 |
"""Converts aspect ratio string to width, height tuple."""
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
return 664, 664
|
26 |
elif aspect_ratio == "16:9":
|
27 |
return 832, 464
|
@@ -36,8 +52,8 @@ def get_image_size(aspect_ratio):
|
|
36 |
elif aspect_ratio == "2:3":
|
37 |
return 528, 792
|
38 |
else:
|
39 |
-
# Default to
|
40 |
-
return
|
41 |
|
42 |
# --- Main Inference Function ---
|
43 |
@spaces.GPU(duration=30)
|
@@ -70,8 +86,8 @@ def generate_image(
|
|
70 |
|
71 |
# Set up the generator for reproducibility
|
72 |
generator = torch.Generator(device=device).manual_seed(seed)
|
73 |
-
magic_prompt = "Ultra HD, 4K, cinematic composition"
|
74 |
-
|
75 |
print(f"Calling pipeline with prompt: '{prompt}'")
|
76 |
# if prompt_enhance:
|
77 |
# prompt = rewrite(prompt)
|
@@ -150,9 +166,9 @@ with gr.Blocks(css=css) as demo:
|
|
150 |
|
151 |
with gr.Row():
|
152 |
aspect_ratio = gr.Radio(
|
153 |
-
label="
|
154 |
-
choices=["
|
155 |
-
value="16:
|
156 |
)
|
157 |
prompt_enhance = gr.Checkbox(label="Prompt Enhance", value=True)
|
158 |
|
|
|
19 |
# --- UI Constants and Helpers ---
|
20 |
MAX_SEED = np.iinfo(np.int32).max
|
21 |
|
22 |
+
def ensure_divisible_by_16(value):
|
23 |
+
"""Ensures a value is divisible by 16 by rounding to nearest multiple."""
|
24 |
+
return (value + 8) // 16 * 16
|
25 |
+
|
26 |
def get_image_size(aspect_ratio):
|
27 |
"""Converts aspect ratio string to width, height tuple."""
|
28 |
+
# Handle 16:9 size variants
|
29 |
+
if aspect_ratio == "16:9_large":
|
30 |
+
return 1664, 928
|
31 |
+
elif aspect_ratio == "16:9_three_quarter":
|
32 |
+
# 75% of full size, ensuring divisibility by 16
|
33 |
+
width = ensure_divisible_by_16(int(1664 * 0.75))
|
34 |
+
height = ensure_divisible_by_16(int(928 * 0.75))
|
35 |
+
return width, height # 1248, 704
|
36 |
+
elif aspect_ratio == "16:9_half":
|
37 |
+
# 50% of full size
|
38 |
+
return 832, 464
|
39 |
+
# Keep original aspect ratios as fallback
|
40 |
+
elif aspect_ratio == "1:1":
|
41 |
return 664, 664
|
42 |
elif aspect_ratio == "16:9":
|
43 |
return 832, 464
|
|
|
52 |
elif aspect_ratio == "2:3":
|
53 |
return 528, 792
|
54 |
else:
|
55 |
+
# Default to 16:9_half if something goes wrong
|
56 |
+
return 832, 464
|
57 |
|
58 |
# --- Main Inference Function ---
|
59 |
@spaces.GPU(duration=30)
|
|
|
86 |
|
87 |
# Set up the generator for reproducibility
|
88 |
generator = torch.Generator(device=device).manual_seed(seed)
|
89 |
+
# magic_prompt = "Ultra HD, 4K, cinematic composition"
|
90 |
+
# prompt = prompt + " " + magic_prompt
|
91 |
print(f"Calling pipeline with prompt: '{prompt}'")
|
92 |
# if prompt_enhance:
|
93 |
# prompt = rewrite(prompt)
|
|
|
166 |
|
167 |
with gr.Row():
|
168 |
aspect_ratio = gr.Radio(
|
169 |
+
label="Image Size (16:9 aspect ratio)",
|
170 |
+
choices=["16:9_large", "16:9_three_quarter", "16:9_half"],
|
171 |
+
value="16:9_half",
|
172 |
)
|
173 |
prompt_enhance = gr.Checkbox(label="Prompt Enhance", value=True)
|
174 |
|