kaiiddo commited on
Commit
ab27705
·
verified ·
1 Parent(s): 375816d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -0
app.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from PIL import Image
4
+ from diffusers import QwenImageEditPipeline
5
+ import os
6
+
7
+ # Initialize model
8
+ model_id = "Qwen/Qwen-Image-Edit"
9
+ pipe = None
10
+
11
+ def load_model():
12
+ global pipe
13
+ if pipe is None:
14
+ try:
15
+ pipe = QwenImageEditPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
16
+ # For CPU compatibility, remove CUDA-specific optimizations
17
+ # Note: This will be slower but functional on CPU
18
+ pipe = pipe.to("cpu")
19
+ print("Model loaded successfully.")
20
+ except Exception as e:
21
+ print(f"Error loading model: {e}")
22
+ raise gr.Error("Failed to load model. Please check logs.")
23
+
24
+ # Load model on startup
25
+ load_model()
26
+
27
+ def process_image(image, prompt):
28
+ if pipe is None:
29
+ raise gr.Error("Model not loaded.")
30
+
31
+ # Ensure image is RGB
32
+ if image.mode != "RGB":
33
+ image = image.convert("RGB")
34
+
35
+ # Prepare inputs
36
+ inputs = {
37
+ "image": image,
38
+ "prompt": prompt,
39
+ "generator": torch.manual_seed(0),
40
+ "true_cfg_scale": 4.0,
41
+ "negative_prompt": " ",
42
+ "num_inference_steps": 50,
43
+ }
44
+
45
+ # Run inference
46
+ with torch.inference_mode():
47
+ output = pipe(**inputs)
48
+ output_image = output.images[0]
49
+
50
+ return output_image
51
+
52
+ # Gradio Interface
53
+ with gr.Blocks(title="Qwen Image Editor", theme=gr.themes.Default(primary_hue="#615ced")) as demo:
54
+ gr.Markdown("# 🎨 Qwen Image Editor")
55
+ gr.Image(value="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png", label="", height=80)
56
+ gr.Markdown("Edit images using text prompts powered by Qwen-Image-Edit")
57
+
58
+ with gr.Row():
59
+ with gr.Column():
60
+ image_input = gr.Image(type="pil", label="Input Image")
61
+ prompt_input = gr.Textbox(label="Edit Prompt", placeholder="Example: Change the rabbit's color to purple...")
62
+ submit_btn = gr.Button("Generate Edited Image", variant="primary")
63
+
64
+ with gr.Column():
65
+ image_output = gr.Image(label="Output Image", interactive=False)
66
+
67
+ submit_btn.click(
68
+ fn=process_image,
69
+ inputs=[image_input, prompt_input],
70
+ outputs=image_output
71
+ )
72
+
73
+ gr.Examples(
74
+ examples=[
75
+ ["https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/%E5%B9%BB%E7%81%AF%E7%89%873.JPG", "Make the capybara wear sunglasses"],
76
+ ["https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/%E5%B9%BB%E7%81%AF%E7%89%8712.JPG", "Rotate the object 90 degrees clockwise"],
77
+ ["https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/%E5%B9%BB%E7%81%AF%E7%89%8715.JPG", "Change the text to 'Hello World' in blue"],
78
+ ],
79
+ inputs=[image_input, prompt_input],
80
+ outputs=image_output,
81
+ fn=process_image,
82
+ cache_examples=False
83
+ )
84
+
85
+ if __name__ == "__main__":
86
+ demo.launch()