Spaces:
Running
on
Zero
Running
on
Zero
| import gradio as gr | |
| import numpy as np | |
| import random | |
| import torch | |
| import spaces | |
| from PIL import Image | |
| from diffusers import FlowMatchEulerDiscreteScheduler | |
| from optimization import optimize_pipeline_ | |
| from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline | |
| from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel | |
| from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3 | |
| from gradio.themes import Soft | |
| from gradio.themes.utils import colors, fonts, sizes | |
| # --- Custom Theme Definition --- | |
| colors.orange_red = colors.Color( | |
| name="orange_red", | |
| c50="#FFF0E5", c100="#FFE0CC", c200="#FFC299", c300="#FFA366", | |
| c400="#FF8533", c500="#FF4500", c600="#E63E00", c700="#CC3700", | |
| c800="#B33000", c900="#992900", c950="#802200", | |
| ) | |
| class OrangeRedTheme(Soft): | |
| def __init__(self, *args, **kwargs): | |
| super().__init__(*args, **kwargs) | |
| super().set( | |
| button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)", | |
| button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)", | |
| button_primary_text_color="white", | |
| ) | |
| orange_red_theme = OrangeRedTheme() | |
| # --- Model Loading --- | |
| dtype = torch.bfloat16 | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", | |
| transformer=QwenImageTransformer2DModel.from_pretrained("linoyts/Qwen-Image-Edit-Rapid-AIO", | |
| subfolder='transformer', | |
| torch_dtype=dtype, | |
| device_map='cuda'), torch_dtype=dtype).to(device) | |
| # Load all LoRA adapters | |
| pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multiple-angles", weight_name="镜头转换.safetensors", adapter_name="angles") | |
| pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Light_restoration", weight_name="移除光影.safetensors", adapter_name="light_restoration") | |
| pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime", weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors", adapter_name="photo_to_anime") | |
| pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight", weight_name="Qwen-Edit-Relight.safetensors", adapter_name="relight") | |
| pipe.transformer.__class__ = QwenImageTransformer2DModel | |
| pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3()) | |
| optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt") | |
| MAX_SEED = np.iinfo(np.int32).max | |
| def infer(input_image, prompt, lora_adapter, seed=42, randomize_seed=True, guidance_scale=1.0, steps=4, progress=gr.Progress(track_tqdm=True)): | |
| """ | |
| Perform image editing based on the selected LoRA adapter and prompt. | |
| """ | |
| if not input_image: | |
| raise gr.Error("Please upload an image for editing.") | |
| # Set the LoRA adapter based on user selection | |
| if lora_adapter == "Multiple Angles": | |
| pipe.set_adapters(["angles"], adapter_weights=[1.0]) | |
| elif lora_adapter == "Light Restoration": | |
| pipe.set_adapters(["light_restoration"], adapter_weights=[1.0]) | |
| elif lora_adapter == "Photo to Anime": | |
| pipe.set_adapters(["photo_to_anime"], adapter_weights=[1.0]) | |
| elif lora_adapter == "Relight": | |
| pipe.set_adapters(["relight"], adapter_weights=[1.0]) | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| generator = torch.Generator(device=device).manual_seed(seed) | |
| original_image = input_image.copy().convert("RGB") | |
| result = pipe( | |
| image=original_image, | |
| prompt=prompt, | |
| height=original_image.size[1], | |
| width=original_image.size[0], | |
| num_inference_steps=steps, | |
| generator=generator, | |
| true_cfg_scale=guidance_scale, | |
| num_images_per_prompt=1, | |
| ).images[0] | |
| return (original_image, result), seed, gr.Button(visible=True) | |
| def infer_example(input_image, prompt, lora_adapter): | |
| """ | |
| Wrapper function for gr.Examples to call the main infer logic for the slider. | |
| """ | |
| (original_image, generated_image), seed, _ = infer(input_image, prompt, lora_adapter, upscale_image=False) | |
| return (original_image, generated_image), seed | |
| # --- UI --- | |
| css = """ | |
| #col-container { | |
| margin: 0 auto; | |
| max-width: 960px; | |
| } | |
| #main-title h1 {font-size: 2.1em !important;} | |
| """ | |
| with gr.Blocks(css=css, theme=orange_red_theme) as demo: | |
| with gr.Column(elem_id="col-container"): | |
| gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast**", elem_id="main-title") | |
| gr.Markdown("Image manipulation with Qwen Image Edit 2509 and various LoRA adapters.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| input_image = gr.Image(label="Upload Image", type="pil", height="300") | |
| with gr.Row(): | |
| prompt = gr.Text( | |
| label="Edit Prompt", | |
| show_label=False, | |
| max_lines=1, | |
| placeholder="Enter your prompt for editing", | |
| container=False, | |
| ) | |
| run_button = gr.Button("Run", variant="primary", scale=0) | |
| lora_adapter = gr.Dropdown( | |
| label="Choose LoRA Adapter", | |
| choices=["Multiple Angles", "Light Restoration", "Photo to Anime", "Relight"], | |
| value="Multiple Angles" | |
| ) | |
| with gr.Accordion("Advanced Settings", open=False): | |
| seed = gr.Slider( | |
| label="Seed", | |
| minimum=0, | |
| maximum=MAX_SEED, | |
| step=1, | |
| value=0, | |
| ) | |
| randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
| guidance_scale = gr.Slider( | |
| label="Guidance Scale", | |
| minimum=1, | |
| maximum=10, | |
| step=0.1, | |
| value=1.0, | |
| ) | |
| steps = gr.Slider( | |
| label="Steps", | |
| minimum=1, | |
| maximum=30, | |
| value=4, | |
| step=1 | |
| ) | |
| with gr.Column(): | |
| output_slider = gr.Image(label="Output Image", show_label=True, interactive=False, format="png") | |
| reuse_button = gr.Button("Reuse this image", visible=False) | |
| gr.Examples( | |
| examples=[ | |
| ["examples/sea.png", "Rotate the camera 90 degrees to the left.", "Multiple Angles"], | |
| ["examples/shadow.jpg", "Remove shadows and relight the image using soft lighting.", "Light Restoration"], | |
| ["examples/girl.jpg", "transform into anime", "Photo to Anime"], | |
| ["examples/dark.jpg", "Relight the image using soft, diffused lighting that simulates sunlight filtering through curtains.", "Relight"], | |
| ], | |
| inputs=[input_image, prompt, lora_adapter], | |
| outputs=[output_slider, seed], | |
| fn=infer, | |
| cache_examples=False, | |
| label="Examples" | |
| ) | |
| gr.on( | |
| triggers=[run_button.click, prompt.submit], | |
| fn=infer, | |
| inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps], | |
| outputs=[output_slider, seed, reuse_button] | |
| ) | |
| reuse_button.click( | |
| fn=lambda images: images[1] if isinstance(images, (list, tuple)) and len(images) > 1 else images, | |
| inputs=[output_slider], | |
| outputs=[input_image] | |
| ) | |
| demo.launch(mcp_server=True, ssr_mode=False, show_error=True) |