Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from PIL import Image | |
| from diffusers import QwenImageEditPlusPipeline | |
| import spaces | |
| import numpy as np | |
| # Load the model | |
| pipeline = QwenImageEditPlusPipeline.from_pretrained( | |
| "Qwen/Qwen-Image-Edit-2509", | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 | |
| ) | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| pipeline = pipeline.to(device) | |
| # Request GPU for 2 minutes per run | |
| def edit_image( | |
| input_images, | |
| prompt, | |
| seed=0, | |
| cfg_scale=4.0, | |
| num_steps=40, | |
| negative_prompt=" " | |
| ): | |
| """Process single or multiple images with Qwen.""" | |
| # Set random seed for reproducibility | |
| generator = torch.Generator(device=device).manual_seed(seed) | |
| # Handle input - can be single image or list of images | |
| if isinstance(input_images, list): | |
| # Multiple images | |
| images = [Image.open(img).convert("RGB") for img in input_images] | |
| else: | |
| # Single image | |
| images = Image.open(input_images).convert("RGB") | |
| # Run the pipeline | |
| result = pipeline( | |
| image=images, | |
| prompt=prompt, | |
| generator=generator, | |
| true_cfg_scale=cfg_scale, | |
| num_inference_steps=num_steps, | |
| guidance_scale=1.0, | |
| negative_prompt=negative_prompt, | |
| num_images_per_prompt=1 | |
| ) | |
| return result.images[0] | |
| # Create the Gradio interface | |
| with gr.Blocks(title="Qwen Image Editor") as demo: | |
| gr.Markdown("# Qwen Image Editor") | |
| gr.Markdown("Upload one or more images and describe how you want to edit them.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| input_images = gr.File( | |
| label="Upload Image(s)", | |
| file_count="multiple", | |
| file_types=["image"], | |
| type="filepath" | |
| ) | |
| prompt = gr.Textbox( | |
| label="Editing Instructions", | |
| placeholder="Describe what changes you want to make to the image(s)", | |
| lines=3 | |
| ) | |
| with gr.Accordion("Advanced Settings", open=False): | |
| seed = gr.Slider( | |
| label="Seed", | |
| minimum=0, | |
| maximum=999999, | |
| step=1, | |
| value=0 | |
| ) | |
| cfg_scale = gr.Slider( | |
| label="CFG Scale", | |
| minimum=1.0, | |
| maximum=10.0, | |
| step=0.5, | |
| value=4.0 | |
| ) | |
| num_steps = gr.Slider( | |
| label="Number of Steps", | |
| minimum=20, | |
| maximum=100, | |
| step=10, | |
| value=40 | |
| ) | |
| negative_prompt = gr.Textbox( | |
| label="Negative Prompt", | |
| value=" ", | |
| visible=False | |
| ) | |
| submit_btn = gr.Button("Generate", variant="primary") | |
| with gr.Column(): | |
| output_image = gr.Image( | |
| label="Edited Result", | |
| type="pil" | |
| ) | |
| # Examples | |
| gr.Examples( | |
| examples=[ | |
| [["example1.jpg"], "Change the background to a beach"], | |
| [["example2.jpg"], "Make it look vintage"], | |
| [["person.jpg", "product.jpg"], "Place the person next to the product in a modern office"] | |
| ], | |
| inputs=[input_images, prompt], | |
| outputs=output_image, | |
| cache_examples=False | |
| ) | |
| # Set up the click event | |
| submit_btn.click( | |
| fn=edit_image, | |
| inputs=[input_images, prompt, seed, cfg_scale, num_steps, negative_prompt], | |
| outputs=output_image, | |
| api_name="predict" # This enables API access | |
| ) | |
| # Launch the app | |
| demo.launch() |