Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
# --------------------------------------------------------------
|
| 2 |
-
# Qwen‑Image‑Edit‑2509 LoRA Demo –
|
| 3 |
# --------------------------------------------------------------
|
| 4 |
import os
|
| 5 |
import random
|
|
@@ -7,7 +7,7 @@ import numpy as np
|
|
| 7 |
import torch
|
| 8 |
import gradio as gr
|
| 9 |
import spaces
|
| 10 |
-
from PIL import Image
|
| 11 |
from typing import Iterable
|
| 12 |
|
| 13 |
# -------------------- THEME ---------------------------------
|
|
@@ -169,43 +169,51 @@ pipe.load_lora_weights(
|
|
| 169 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 170 |
|
| 171 |
# --------------------------------------------------------------
|
| 172 |
-
# Helper –
|
| 173 |
# --------------------------------------------------------------
|
| 174 |
DIVISIBLE_BY = 8
|
| 175 |
-
MAX_SIDE = 1024 #
|
| 176 |
|
| 177 |
-
def
|
| 178 |
-
"""Round **
|
| 179 |
-
return (x // base) * base
|
| 180 |
|
| 181 |
-
def
|
| 182 |
"""
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
|
|
|
|
|
|
| 186 |
"""
|
| 187 |
-
|
| 188 |
|
| 189 |
-
#
|
| 190 |
-
|
| 191 |
-
|
|
|
|
| 192 |
new_w = MAX_SIDE
|
| 193 |
-
new_h = int(
|
| 194 |
-
else:
|
| 195 |
new_h = MAX_SIDE
|
| 196 |
-
new_w = int(
|
| 197 |
-
|
| 198 |
-
|
| 199 |
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
new_h = _make_multiple(new_h)
|
| 203 |
|
| 204 |
-
|
| 205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
|
| 207 |
-
# keep the original dimensions for the final upscale step
|
| 208 |
-
return resized, (orig_w, orig_h)
|
| 209 |
|
| 210 |
# --------------------------------------------------------------
|
| 211 |
# Inference function (GPU‑bound)
|
|
@@ -223,7 +231,7 @@ def infer(
|
|
| 223 |
steps,
|
| 224 |
progress=gr.Progress(track_tqdm=True),
|
| 225 |
):
|
| 226 |
-
"""Run a single edit –
|
| 227 |
if input_image is None:
|
| 228 |
raise gr.Error("Please upload an image to edit.")
|
| 229 |
|
|
@@ -254,7 +262,7 @@ def infer(
|
|
| 254 |
|
| 255 |
# ---------- Image preparation ----------
|
| 256 |
original = input_image.convert("RGB")
|
| 257 |
-
pipe_input,
|
| 258 |
|
| 259 |
# ---------- Diffusion ----------
|
| 260 |
result = pipe(
|
|
@@ -268,14 +276,15 @@ def infer(
|
|
| 268 |
true_cfg_scale=guidance_scale,
|
| 269 |
).images[0]
|
| 270 |
|
| 271 |
-
# ----------
|
| 272 |
-
final = result.
|
|
|
|
| 273 |
|
| 274 |
return final, seed
|
| 275 |
|
| 276 |
|
| 277 |
# --------------------------------------------------------------
|
| 278 |
-
# Example helper (
|
| 279 |
# --------------------------------------------------------------
|
| 280 |
@spaces.GPU(duration=30)
|
| 281 |
def infer_example(input_image, prompt, lora_adapter):
|
|
@@ -305,7 +314,7 @@ with gr.Blocks() as demo:
|
|
| 305 |
elem_id="main-title")
|
| 306 |
gr.Markdown(
|
| 307 |
"Edit images with a variety of LoRA adapters while preserving the "
|
| 308 |
-
"
|
| 309 |
)
|
| 310 |
|
| 311 |
with gr.Row(equal_height=True):
|
|
|
|
| 1 |
# --------------------------------------------------------------
|
| 2 |
+
# Qwen‑Image‑Edit‑2509 LoRA Demo – 100 % aspect‑ratio preservation
|
| 3 |
# --------------------------------------------------------------
|
| 4 |
import os
|
| 5 |
import random
|
|
|
|
| 7 |
import torch
|
| 8 |
import gradio as gr
|
| 9 |
import spaces
|
| 10 |
+
from PIL import Image
|
| 11 |
from typing import Iterable
|
| 12 |
|
| 13 |
# -------------------- THEME ---------------------------------
|
|
|
|
| 169 |
pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
|
| 170 |
|
| 171 |
# --------------------------------------------------------------
|
| 172 |
+
# Helper – pad to a multiple of 8 (no resizing)
|
| 173 |
# --------------------------------------------------------------
|
| 174 |
DIVISIBLE_BY = 8
|
| 175 |
+
MAX_SIDE = 1024 # the model cannot accept a side larger than this
|
| 176 |
|
| 177 |
+
def _make_multiple_up(x: int, base: int = DIVISIBLE_BY) -> int:
|
| 178 |
+
"""Round **up** to the nearest multiple of `base`."""
|
| 179 |
+
return ((x + base - 1) // base) * base
|
| 180 |
|
| 181 |
+
def pad_to_multiple_of_8(img: Image.Image):
|
| 182 |
"""
|
| 183 |
+
Pad the image with black pixels so that *both* dimensions are a multiple of 8.
|
| 184 |
+
Returns:
|
| 185 |
+
padded_img – the image that will be fed to the pipeline
|
| 186 |
+
crop_box – the (left, top, right, bottom) box that later lets us
|
| 187 |
+
crop the generation back to the original size.
|
| 188 |
"""
|
| 189 |
+
w, h = img.size
|
| 190 |
|
| 191 |
+
# Clamp the size to the model's hard limit (1024). If the user uploads a
|
| 192 |
+
# bigger picture we simply down‑scale it proportionally *before* padding.
|
| 193 |
+
if max(w, h) > MAX_SIDE:
|
| 194 |
+
if w >= h:
|
| 195 |
new_w = MAX_SIDE
|
| 196 |
+
new_h = int(h * MAX_SIDE / w)
|
| 197 |
+
else:
|
| 198 |
new_h = MAX_SIDE
|
| 199 |
+
new_w = int(w * MAX_SIDE / h)
|
| 200 |
+
img = img.resize((new_w, new_h), Image.LANCZOS)
|
| 201 |
+
w, h = img.size
|
| 202 |
|
| 203 |
+
pad_w = _make_multiple_up(w)
|
| 204 |
+
pad_h = _make_multiple_up(h)
|
|
|
|
| 205 |
|
| 206 |
+
left = (pad_w - w) // 2
|
| 207 |
+
top = (pad_h - h) // 2
|
| 208 |
+
right = left + w
|
| 209 |
+
bottom = top + h
|
| 210 |
+
|
| 211 |
+
# create a black canvas and paste the original image in the centre
|
| 212 |
+
padded = Image.new("RGB", (pad_w, pad_h), (0, 0, 0))
|
| 213 |
+
padded.paste(img, (left, top))
|
| 214 |
+
|
| 215 |
+
return padded, (left, top, right, bottom)
|
| 216 |
|
|
|
|
|
|
|
| 217 |
|
| 218 |
# --------------------------------------------------------------
|
| 219 |
# Inference function (GPU‑bound)
|
|
|
|
| 231 |
steps,
|
| 232 |
progress=gr.Progress(track_tqdm=True),
|
| 233 |
):
|
| 234 |
+
"""Run a single edit – the output has exactly the same width×height as the upload."""
|
| 235 |
if input_image is None:
|
| 236 |
raise gr.Error("Please upload an image to edit.")
|
| 237 |
|
|
|
|
| 262 |
|
| 263 |
# ---------- Image preparation ----------
|
| 264 |
original = input_image.convert("RGB")
|
| 265 |
+
pipe_input, crop_box = pad_to_multiple_of_8(original) # <-- NEW helper
|
| 266 |
|
| 267 |
# ---------- Diffusion ----------
|
| 268 |
result = pipe(
|
|
|
|
| 276 |
true_cfg_scale=guidance_scale,
|
| 277 |
).images[0]
|
| 278 |
|
| 279 |
+
# ---------- Crop back to the *exact* original resolution ----------
|
| 280 |
+
final = result.crop(crop_box) # remove the padding we added
|
| 281 |
+
final = final.resize(original.size, Image.LANCZOS) # just in case rounding moved a pixel
|
| 282 |
|
| 283 |
return final, seed
|
| 284 |
|
| 285 |
|
| 286 |
# --------------------------------------------------------------
|
| 287 |
+
# Example helper (deterministic quick run)
|
| 288 |
# --------------------------------------------------------------
|
| 289 |
@spaces.GPU(duration=30)
|
| 290 |
def infer_example(input_image, prompt, lora_adapter):
|
|
|
|
| 314 |
elem_id="main-title")
|
| 315 |
gr.Markdown(
|
| 316 |
"Edit images with a variety of LoRA adapters while preserving the "
|
| 317 |
+
"exact input resolution (no side‑cropping, no automatic resizing)."
|
| 318 |
)
|
| 319 |
|
| 320 |
with gr.Row(equal_height=True):
|