import gradio as gr import torch from PIL import Image from diffusers import QwenImageEditPipeline import os # Global variable to store model model = None def load_model(): global model if model is None: try: # Load model with optimized settings for CPU model = QwenImageEditPipeline.from_pretrained( "Qwen/Qwen-Image-Edit", torch_dtype=torch.float32, low_cpu_mem_usage=True ) # Use CPU only for free tier model = model.to("cpu") print("Model loaded successfully on CPU") except Exception as e: print(f"Error loading model: {e}") raise gr.Error("Failed to load model. Please try again later.") def process_image(image, prompt): global model if model is None: load_model() # Ensure image is RGB if image.mode != "RGB": image = image.convert("RGB") # Prepare inputs with optimized parameters for CPU inputs = { "image": image, "prompt": prompt, "generator": torch.manual_seed(0), "true_cfg_scale": 4.0, "negative_prompt": " ", "num_inference_steps": 30, # Reduced steps for faster CPU processing } # Run inference with CPU optimization with torch.inference_mode(): output = model(**inputs) output_image = output.images[0] return output_image # Create theme with purple color theme = gr.themes.Default( primary_hue=gr.themes.colors.purple, secondary_hue=gr.themes.colors.purple, neutral_hue=gr.themes.colors.gray ) # Gradio Interface with gr.Blocks(title="Qwen Image Editor", theme=theme) as demo: # Header with just the logo image gr.Image( value="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png", label="", height=80, show_label=False, container=False # This removes the card/frame ) gr.Markdown("# 🎨 Qwen Image Editor") gr.Markdown("Edit images using text prompts powered by Qwen-Image-Edit (CPU optimized)") with gr.Row(): with gr.Column(): image_input = gr.Image(type="pil", label="Input Image", height=300) prompt_input = gr.Textbox(label="Edit Prompt", placeholder="Example: Change the rabbit's color to purple...") submit_btn = gr.Button("Generate Edited Image", variant="primary") with gr.Column(): image_output = gr.Image(label="Output Image", interactive=False, height=300) submit_btn.click( fn=process_image, inputs=[image_input, prompt_input], outputs=image_output ) # Using publicly accessible example images from the documentation gr.Examples( examples=[ ["https://qwen-qwen-image-edit.hf.space/gradio_api/file=/tmp/gradio/c047327cd0627e856e5462452cb004526063c033683652d4f24626671e49fba4/neon_sign.png", "change the text to read 'Qwen Image Edit is here'"], ["https://qwen-qwen-image-edit.hf.space/gradio_api/file=/tmp/gradio/faee7db03e2f2843ff7544b0434b2f0b1bcf84e79a47e3e7c1277484176543e8/cat_sitting.jpg", "make the cat floating in the air and holding a sign that reads 'this is fun' written with a blue crayon"], ["https://qwen-qwen-image-edit.hf.space/gradio_api/file=/tmp/gradio/2a197adbb8c9432415e9788bd4c2365c6f42c863393fc81a265e06d47384ad34/pie.png", "turn the style of the photo to vintage comic book"], ], inputs=[image_input, prompt_input], outputs=image_output, fn=process_image, cache_examples=False ) if __name__ == "__main__": demo.launch()