Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -190,7 +190,6 @@ def infer(
|
|
190 |
true_guidance_scale=1.0,
|
191 |
num_inference_steps=8, # Default to 8 steps for fast inference
|
192 |
rewrite_prompt=True,
|
193 |
-
num_images_per_prompt=1,
|
194 |
progress=gr.Progress(track_tqdm=True),
|
195 |
):
|
196 |
"""
|
@@ -213,7 +212,7 @@ def infer(
|
|
213 |
prompt = polish_prompt(prompt, image)
|
214 |
print(f"Rewritten Prompt: {prompt}")
|
215 |
|
216 |
-
# Generate the edited image
|
217 |
try:
|
218 |
images = pipe(
|
219 |
image,
|
@@ -222,14 +221,16 @@ def infer(
|
|
222 |
num_inference_steps=num_inference_steps,
|
223 |
generator=generator,
|
224 |
true_cfg_scale=true_guidance_scale,
|
225 |
-
num_images_per_prompt=
|
226 |
).images
|
|
|
|
|
|
|
|
|
227 |
except Exception as e:
|
228 |
print(f"Error during inference: {e}")
|
229 |
raise e
|
230 |
|
231 |
-
return images, seed
|
232 |
-
|
233 |
# --- Examples and UI Layout ---
|
234 |
examples = [
|
235 |
# You can add example pairs of [image_path, prompt] here
|
@@ -256,7 +257,7 @@ with gr.Blocks(css=css) as demo:
|
|
256 |
gr.HTML("""
|
257 |
<div id="logo-title">
|
258 |
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Edit Logo" width="400" style="display: block; margin: 0 auto;">
|
259 |
-
<h2 style="font-style: italic;color: #5b47d1;margin-top: -
|
260 |
</div>
|
261 |
""")
|
262 |
gr.Markdown("""
|
@@ -272,7 +273,8 @@ with gr.Blocks(css=css) as demo:
|
|
272 |
show_label=True,
|
273 |
type="pil"
|
274 |
)
|
275 |
-
|
|
|
276 |
label="Result",
|
277 |
show_label=True,
|
278 |
type="pil"
|
@@ -315,20 +317,11 @@ with gr.Blocks(css=css) as demo:
|
|
315 |
value=8
|
316 |
)
|
317 |
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
step=1,
|
324 |
-
value=1,
|
325 |
-
visible=False
|
326 |
-
)
|
327 |
-
|
328 |
-
rewrite_prompt = gr.Checkbox(
|
329 |
-
label="Enhance prompt (using HF Inference)",
|
330 |
-
value=True
|
331 |
-
)
|
332 |
|
333 |
# gr.Examples(examples=examples, inputs=[input_image, prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|
334 |
|
@@ -343,7 +336,7 @@ with gr.Blocks(css=css) as demo:
|
|
343 |
true_guidance_scale,
|
344 |
num_inference_steps,
|
345 |
rewrite_prompt,
|
346 |
-
num_images_per_prompt
|
347 |
],
|
348 |
outputs=[result, seed],
|
349 |
)
|
|
|
190 |
true_guidance_scale=1.0,
|
191 |
num_inference_steps=8, # Default to 8 steps for fast inference
|
192 |
rewrite_prompt=True,
|
|
|
193 |
progress=gr.Progress(track_tqdm=True),
|
194 |
):
|
195 |
"""
|
|
|
212 |
prompt = polish_prompt(prompt, image)
|
213 |
print(f"Rewritten Prompt: {prompt}")
|
214 |
|
215 |
+
# Generate the edited image - always generate just 1 image
|
216 |
try:
|
217 |
images = pipe(
|
218 |
image,
|
|
|
221 |
num_inference_steps=num_inference_steps,
|
222 |
generator=generator,
|
223 |
true_cfg_scale=true_guidance_scale,
|
224 |
+
num_images_per_prompt=1 # Always generate only 1 image
|
225 |
).images
|
226 |
+
|
227 |
+
# Return the first (and only) image
|
228 |
+
return images[0], seed
|
229 |
+
|
230 |
except Exception as e:
|
231 |
print(f"Error during inference: {e}")
|
232 |
raise e
|
233 |
|
|
|
|
|
234 |
# --- Examples and UI Layout ---
|
235 |
examples = [
|
236 |
# You can add example pairs of [image_path, prompt] here
|
|
|
257 |
gr.HTML("""
|
258 |
<div id="logo-title">
|
259 |
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Edit Logo" width="400" style="display: block; margin: 0 auto;">
|
260 |
+
<h2 style="font-style: italic;color: #5b47d1;margin-top: -27px !important;margin-left: 133px;">Fast, 8-steps with Lightning LoRA</h2>
|
261 |
</div>
|
262 |
""")
|
263 |
gr.Markdown("""
|
|
|
273 |
show_label=True,
|
274 |
type="pil"
|
275 |
)
|
276 |
+
# Changed from Gallery to Image
|
277 |
+
result = gr.Image(
|
278 |
label="Result",
|
279 |
show_label=True,
|
280 |
type="pil"
|
|
|
317 |
value=8
|
318 |
)
|
319 |
|
320 |
+
# Removed num_images_per_prompt slider entirely
|
321 |
+
rewrite_prompt = gr.Checkbox(
|
322 |
+
label="Enhance prompt (using HF Inference)",
|
323 |
+
value=True
|
324 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
325 |
|
326 |
# gr.Examples(examples=examples, inputs=[input_image, prompt], outputs=[result, seed], fn=infer, cache_examples=False)
|
327 |
|
|
|
336 |
true_guidance_scale,
|
337 |
num_inference_steps,
|
338 |
rewrite_prompt,
|
339 |
+
# Removed num_images_per_prompt from inputs
|
340 |
],
|
341 |
outputs=[result, seed],
|
342 |
)
|