multimodalart HF Staff commited on
Commit
f2adf14
·
verified ·
1 Parent(s): a3748ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -2
app.py CHANGED
@@ -5,7 +5,11 @@ import torch
5
  import spaces
6
 
7
  from PIL import Image
8
- from diffusers import QwenImageEditPipeline
 
 
 
 
9
 
10
  import os
11
  import base64
@@ -140,6 +144,11 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
140
 
141
  # Load the model pipeline
142
  pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=dtype).to(device)
 
 
 
 
 
143
 
144
  # --- UI Constants and Helpers ---
145
  MAX_SEED = np.iinfo(np.int32).max
@@ -151,7 +160,7 @@ def infer(
151
  prompt,
152
  seed=120,
153
  randomize_seed=False,
154
- true_guidance_scale=1.0,
155
  num_inference_steps=50,
156
  rewrite_prompt=True,
157
  progress=gr.Progress(track_tqdm=True),
 
5
  import spaces
6
 
7
  from PIL import Image
8
+
9
+ from optimization import optimize_pipeline_
10
+ from qwenimage.pipeline_qwen_image_edit import QwenImageEditPipeline
11
+ from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
12
+ from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
13
 
14
  import os
15
  import base64
 
144
 
145
  # Load the model pipeline
146
  pipe = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit", torch_dtype=dtype).to(device)
147
+ pipe.transformer.__class__ = QwenImageTransformer2DModel
148
+ pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
149
+
150
+ # --- Ahead-of-time compilation ---
151
+ optimize_pipeline_(pipe, image=Image.new("RGB", (1024, 1024)), prompt="prompt")
152
 
153
  # --- UI Constants and Helpers ---
154
  MAX_SEED = np.iinfo(np.int32).max
 
160
  prompt,
161
  seed=120,
162
  randomize_seed=False,
163
+ true_guidance_scale=4.0,
164
  num_inference_steps=50,
165
  rewrite_prompt=True,
166
  progress=gr.Progress(track_tqdm=True),