Spaces:
Running
on
Zero
Running
on
Zero
File size: 10,081 Bytes
d070580 9419e56 823c43e d070580 823c43e 0b1a5db 8aa1b66 d070580 823c43e 8aa1b66 d070580 823c43e d070580 53fe147 d070580 53fe147 823c43e d070580 8aa1b66 823c43e d070580 8aa1b66 d070580 8aa1b66 823c43e 9419e56 d070580 9419e56 823c43e 9419e56 8aa1b66 d070580 8aa1b66 d070580 8aa1b66 9419e56 d070580 9419e56 823c43e e26cff8 9419e56 d070580 e26cff8 d070580 e26cff8 d070580 9419e56 d070580 823c43e 8aa1b66 d070580 8aa1b66 d070580 8aa1b66 d070580 8aa1b66 d070580 9419e56 d070580 9419e56 d070580 823c43e d070580 e26cff8 823c43e d070580 9419e56 e26cff8 8aa1b66 d070580 823c43e d070580 823c43e d070580 8aa1b66 e26cff8 70767ab 823c43e d070580 8aa1b66 d070580 823c43e d070580 823c43e d070580 9419e56 d070580 ec57e30 8aa1b66 823c43e d070580 8aa1b66 d070580 823c43e 8aa1b66 d070580 ec57e30 8aa1b66 70767ab d070580 e26cff8 ec57e30 823c43e d070580 823c43e d070580 e26cff8 d070580 9419e56 8aa1b66 d070580 7d35239 9419e56 8aa1b66 d070580 8aa1b66 d070580 9419e56 4655eee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 |
import os
import gradio as gr
import numpy as np
import spaces
import torch
import random
from PIL import Image
from typing import Iterable
# --- Gradio Theme ---
from gradio.themes import Soft
from gradio.themes.utils import colors, fonts, sizes
colors.blue_ish = colors.Color(
name="blue_ish",
c50="#F0F5FF",
c100="#E0EBFF",
c200="#C2D7FF",
c300="#A3C2FF",
c400="#85AFFF",
c500="#4A8DFF",
c600="#3374E6",
c700="#1A5CCC",
c800="#0043B3",
c900="#002B80",
c950="#00144D",
)
class QwenTheme(Soft):
def __init__(
self,
*,
primary_hue: colors.Color | str = colors.gray,
secondary_hue: colors.Color | str = colors.blue_ish,
neutral_hue: colors.Color | str = colors.slate,
text_size: sizes.Size | str = sizes.text_lg,
font: fonts.Font | str | Iterable[fonts.Font | str] = (
fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
),
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
),
):
super().__init__(
primary_hue=primary_hue,
secondary_hue=secondary_hue,
neutral_hue=neutral_hue,
text_size=text_size,
font=font,
font_mono=font_mono,
)
super().set(
body_background_fill="linear-gradient(135deg, *primary_100, *primary_50)",
body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
button_primary_text_color="white",
slider_color="*secondary_500",
slider_color_dark="*secondary_600",
block_title_text_weight="600",
block_border_width="2px",
block_shadow="*shadow_drop_lg",
)
qwen_theme = QwenTheme()
# --- Model Loading ---
from diffusers import FlowMatchEulerDiscreteScheduler
from optimization import optimize_pipeline_
from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = QwenImageEditPlusPipeline.from_pretrained(
"Qwen/Qwen-Image-Edit-2509",
transformer=QwenImageTransformer2DModel.from_pretrained(
"linoyts/Qwen-Image-Edit-Rapid-AIO",
subfolder='transformer',
torch_dtype=dtype,
device_map='cuda'
),
torch_dtype=dtype
).to(device)
# Load all LoRA adapters
pipe.load_lora_weights("autoweeb/Qwen-Image-Edit-2509-Photo-to-Anime",
weight_name="Qwen-Image-Edit-2509-Photo-to-Anime_000001000.safetensors",
adapter_name="anime")
pipe.load_lora_weights("dx8152/Qwen-Edit-2509-Multiple-angles",
weight_name="镜头转换.safetensors",
adapter_name="multiple-angles")
pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Light_restoration",
weight_name="移除光影.safetensors",
adapter_name="light-restoration")
pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Relight",
weight_name="Qwen-Edit-Relight.safetensors",
adapter_name="relight")
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
# It's recommended to run optimization after loading all weights
# optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
MAX_SEED = np.iinfo(np.int32).max
# --- Helper Functions ---
def update_dimensions_on_upload(image):
if image is None:
return 1024, 1024
original_width, original_height = image.size
# Cap max dimension to 1024 while preserving aspect ratio
if original_width > original_height:
new_width = 1024
aspect_ratio = original_height / original_width
new_height = int(new_width * aspect_ratio)
else:
new_height = 1024
aspect_ratio = original_width / original_height
new_width = int(new_height * aspect_ratio)
# Ensure dimensions are multiples of 8 for model compatibility
new_width = (new_width // 8) * 8
new_height = (new_height // 8) * 8
return new_width, new_height
# --- Main Inference Function ---
@spaces.GPU
def infer(
input_image,
prompt,
lora_adapter,
seed,
randomize_seed,
guidance_scale,
steps,
width,
height,
progress=gr.Progress(track_tqdm=True)
):
if input_image is None:
raise gr.Error("Please upload an image to edit.")
# Dynamically set the adapter
if lora_adapter == "Photo-to-Anime":
pipe.set_adapters(["anime"], adapter_weights=[1.0])
elif lora_adapter == "Multiple-Angles":
pipe.set_adapters(["multiple-angles"], adapter_weights=[1.0])
elif lora_adapter == "Light-Restoration":
pipe.set_adapters(["light-restoration"], adapter_weights=[1.0])
elif lora_adapter == "Relight":
pipe.set_adapters(["relight"], adapter_weights=[1.0])
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
# *** FIX: Added a negative prompt to enable classifier-free guidance ***
negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
result = pipe(
image=input_image.convert("RGB"),
prompt=prompt,
negative_prompt=negative_prompt, # This line enables CFG
height=height,
width=width,
num_inference_steps=steps,
generator=generator,
true_cfg_scale=guidance_scale,
num_images_per_prompt=1,
).images[0]
return result, seed, gr.Button(visible=True)
# Wrapper for examples to handle file paths
@spaces.GPU
def infer_example(input_image_path, prompt, lora_adapter):
input_pil = Image.open(input_image_path).convert("RGB")
width, height = update_dimensions_on_upload(input_pil)
# Set default values for example inference
result, seed, _ = infer(input_pil, prompt, lora_adapter, 0, True, 1.0, 4, width, height)
return result, seed
# --- UI Layout ---
css="""
#col-container {
margin: 0 auto;
max-width: 960px;
}
#main-title h1 {font-size: 2.1em !important;}
"""
with gr.Blocks(css=css, theme=qwen_theme) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# **Qwen-Image-Edit-2509-LoRAs-Fast**", elem_id="main-title")
gr.Markdown("Perform diverse image edits using specialized LoRA adapters for the Qwen-Image-Edit model.")
with gr.Row():
with gr.Column():
input_image = gr.Image(label="Upload Image", type="pil")
lora_adapter = gr.Dropdown(
label="Choose Editing Style",
choices=["Photo-to-Anime", "Multiple-Angles", "Light-Restoration", "Relight"],
value="Photo-to-Anime"
)
prompt = gr.Text(
label="Edit Prompt",
show_label=True,
placeholder="e.g., transform into anime",
)
run_button = gr.Button("Run", variant="primary")
with gr.Column():
output_image = gr.Image(label="Output Image", interactive=False)
#reuse_button = gr.Button("Reuse this image", visible=False)
with gr.Accordion("⚙️ Advanced Settings", open=False):
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
# Hidden sliders to hold image dimensions
height = gr.Slider(label="Height", minimum=256, maximum=1024, step=8, value=1024, visible=False)
width = gr.Slider(label="Width", minimum=256, maximum=1024, step=8, value=1024, visible=False)
gr.Examples(
examples=[
["examples/anime_example.jpg", "transform into anime", "Photo-to-Anime"],
["examples/car_example.jpg", "view from the side", "Multiple-Angles"],
["examples/shadow_example.jpg", "Remove shadows and relight the image using soft lighting.", "Light-Restoration"],
["examples/relight_example.jpg", "Relight the image using soft, diffused lighting that simulates sunlight filtering through curtains.", "Relight"],
],
inputs=[input_image, prompt, lora_adapter],
outputs=[output_image, seed],
fn=infer_example,
cache_examples=False,
label="Examples"
)
# --- Event Handlers ---
run_button.click(
fn=infer,
inputs=[input_image, prompt, lora_adapter, seed, randomize_seed, guidance_scale, steps, width, height],
outputs=[output_image, seed]
)
reuse_button.click(
fn=lambda img: img,
inputs=[output_image],
outputs=[input_image]
)
input_image.upload(
fn=update_dimensions_on_upload,
inputs=[input_image],
outputs=[width, height]
)
demo.launch(mcp_server=True, ssr_mode=False, show_error=True) |