Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -114,25 +114,25 @@ def generate_qwen(
|
|
114 |
use_lora = False
|
115 |
if lora_input and lora_input.strip() != "":
|
116 |
load_lora_opt(pipe_qwen, lora_input)
|
|
|
117 |
use_lora = True
|
118 |
|
119 |
-
kwargs = {}
|
120 |
-
if use_lora:
|
121 |
-
kwargs["cross_attention_kwargs"] = {"scale": lora_scale}
|
122 |
-
|
123 |
images = pipe_qwen(
|
124 |
prompt=prompt,
|
125 |
-
negative_prompt=negative_prompt if negative_prompt else
|
126 |
height=height,
|
127 |
width=width,
|
128 |
-
|
|
|
129 |
num_inference_steps=num_inference_steps,
|
130 |
num_images_per_prompt=num_images,
|
131 |
generator=generator,
|
132 |
output_type="pil",
|
133 |
-
**kwargs,
|
134 |
).images
|
135 |
|
|
|
|
|
|
|
136 |
end_time = time.time()
|
137 |
duration = end_time - start_time
|
138 |
|
|
|
114 |
use_lora = False
|
115 |
if lora_input and lora_input.strip() != "":
|
116 |
load_lora_opt(pipe_qwen, lora_input)
|
117 |
+
pipe_qwen.set_adapters(["default"], adapter_weights=[lora_scale])
|
118 |
use_lora = True
|
119 |
|
|
|
|
|
|
|
|
|
120 |
images = pipe_qwen(
|
121 |
prompt=prompt,
|
122 |
+
negative_prompt=negative_prompt if negative_prompt else "",
|
123 |
height=height,
|
124 |
width=width,
|
125 |
+
true_cfg_scale=guidance_scale,
|
126 |
+
guidance_scale=1.0,
|
127 |
num_inference_steps=num_inference_steps,
|
128 |
num_images_per_prompt=num_images,
|
129 |
generator=generator,
|
130 |
output_type="pil",
|
|
|
131 |
).images
|
132 |
|
133 |
+
if use_lora:
|
134 |
+
pipe_qwen.unload_lora_weights()
|
135 |
+
|
136 |
end_time = time.time()
|
137 |
duration = end_time - start_time
|
138 |
|