Spaces:
Running
on
Zero
Running
on
Zero
File size: 9,411 Bytes
71f5363 7d4ee71 71f5363 7d4ee71 3c34e29 6571814 ec6ec95 71f5363 7d4ee71 b6713ac 6da8d6f b6713ac 7d4ee71 f52eda4 b6713ac da164d4 2a5c8e8 da164d4 b6713ac 7d4ee71 3c34e29 6571814 3c34e29 6571814 7d4ee71 b001fe7 7d4ee71 663b3d8 7d4ee71 6da8d6f 7d4ee71 da164d4 7d4ee71 d4a6143 7d4ee71 d4a6143 7d4ee71 6da8d6f 7d4ee71 6da8d6f 5af192e 7d4ee71 353f94c 7d4ee71 b001fe7 353f94c 7d4ee71 cada4f8 7d4ee71 7758b4a 72b300a 7758b4a 72b300a 7758b4a 7d4ee71 179e7fb cef2c46 ebb7e04 7d4ee71 b001fe7 6da8d6f b001fe7 6da8d6f b001fe7 7d4ee71 ab4e57a 7d4ee71 b6713ac 7d4ee71 b6713ac 7d4ee71 da164d4 7d4ee71 ebb7e04 7d4ee71 6da8d6f 7d4ee71 b001fe7 6da8d6f 7d4ee71 76ca2a0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 |
import gradio as gr
import numpy as np
import random
import torch
import spaces
from PIL import Image
from diffusers import FlowMatchEulerDiscreteScheduler, QwenImageEditPlusPipeline
# from optimization import optimize_pipeline_
# from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
# from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
# from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
import math
# --- Model Loading ---
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
# Scheduler configuration for Lightning
scheduler_config = {
"base_image_seq_len": 256,
"base_shift": math.log(5),
"invert_sigmas": False,
"max_image_seq_len": 8192,
"max_shift": math.log(3),
"num_train_timesteps": 1000,
"shift": 1.0,
"shift_terminal": None,
"stochastic_sampling": False,
"time_shift_type": "exponential",
"use_beta_sigmas": False,
"use_dynamic_shifting": True,
"use_exponential_sigmas": False,
"use_karras_sigmas": False,
}
# Initialize scheduler with Lightning config
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
# Load the model pipeline
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2511",
scheduler=scheduler,
torch_dtype=dtype).to(device)
pipe.load_lora_weights(
"lightx2v/Qwen-Image-Edit-2511-Lightning",
weight_name="Qwen-Image-Edit-2511-Lightning-4steps-V1.0-fp32.safetensors"
)
pipe.fuse_lora()
# # Apply the same optimizations from the first version
# pipe.transformer.__class__ = QwenImageTransformer2DModel
# pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())
# # --- Ahead-of-time compilation ---
# optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")
# --- UI Constants and Helpers ---
MAX_SEED = np.iinfo(np.int32).max
def use_output_as_input(output_images):
"""Convert output images to input format for the gallery"""
if output_images is None or len(output_images) == 0:
return []
return output_images
# --- Main Inference Function (with hardcoded negative prompt) ---
@spaces.GPU()
def infer(
image_1,
image_2,
image_3,
prompt,
seed=42,
randomize_seed=False,
true_guidance_scale=1.0,
num_inference_steps=4,
height=None,
width=None,
num_images_per_prompt=1,
progress=gr.Progress(track_tqdm=True),
):
"""
Run image-editing inference using the Qwen-Image-Edit pipeline.
Parameters:
images (list): Input images from the Gradio gallery (PIL or path-based).
prompt (str): Editing instruction (may be rewritten by LLM if enabled).
seed (int): Random seed for reproducibility.
randomize_seed (bool): If True, overrides seed with a random value.
true_guidance_scale (float): CFG scale used by Qwen-Image.
num_inference_steps (int): Number of diffusion steps.
height (int | None): Optional output height override.
width (int | None): Optional output width override.
rewrite_prompt (bool): Whether to rewrite the prompt using Qwen-2.5-VL.
num_images_per_prompt (int): Number of images to generate.
progress: Gradio progress callback.
Returns:
tuple: (generated_images, seed_used, UI_visibility_update)
"""
# Hardcode the negative prompt as requested
negative_prompt = " "
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Set up the generator for reproducibility
generator = torch.Generator(device=device).manual_seed(seed)
# Load input images into a list of PIL Images
pil_images = []
for item in [image_1, image_2, image_3]:
if item is None: continue
pil_images.append(item.convert("RGB"))
if height==256 and width==256:
height, width = None, None
print(f"Calling pipeline with prompt: '{prompt}'")
print(f"Negative Prompt: '{negative_prompt}'")
print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
# Generate the image
images = pipe(
image=pil_images if len(pil_images) > 0 else None,
prompt=prompt,
height=height,
width=width,
negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps,
generator=generator,
true_cfg_scale=true_guidance_scale,
num_images_per_prompt=num_images_per_prompt,
).images
# Return images, seed, and make button visible
return images[0], seed, gr.update(visible=True)
# --- Examples and UI Layout ---
examples = []
css = """
#col-container {
margin: 0 auto;
max-width: 1024px;
}
#logo-title {
text-align: center;
}
#logo-title img {
width: 400px;
}
#edit_text{margin-top: -62px !important}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.HTML("""
<div id="logo-title">
<img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Edit Logo" width="400" style="display: block; margin: 0 auto;">
<h2 style="font-style: italic;color: #5b47d1;margin-top: -27px !important;margin-left: 96px">[Plus] Fast, 4-steps with LightX2V LoRA</h2>
</div>
""")
gr.Markdown("""
[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series.
This demo uses the new [Qwen-Image-Edit-2511](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) with the [Qwen-Image-Lightning-2511](https://huggingface.co/lightx2v/Qwen-Image-Edit-2511-Lightning) LoRA for accelerated inference.
Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit-2509) to run locally with ComfyUI or diffusers.
""")
with gr.Row():
with gr.Column():
image_1 = gr.Image(label="image 1", type="pil", interactive=True)
with gr.Accordion("More references", open=False):
with gr.Row():
image_2 = gr.Image(label="image 2", type="pil", interactive=True)
image_3 = gr.Image(label="image 3", type="pil", interactive=True)
with gr.Column():
result = gr.Image(label="Result", type="pil", interactive=False)
# Add this button right after the result gallery - initially hidden
use_output_btn = gr.Button("↗️ Use as image 1", variant="secondary", size="sm", visible=False)
with gr.Row():
with gr.Column():
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
placeholder="describe the edit instruction",
container=False,
lines=5
)
with gr.Row():
run_button = gr.Button("Edit!", variant="primary")
with gr.Accordion("Advanced Settings", open=False):
# Negative prompt UI element is removed here
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
true_guidance_scale = gr.Slider(
label="True guidance scale",
minimum=1.0,
maximum=10.0,
step=0.1,
value=1.0
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=40,
step=1,
value=4,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=2048,
step=8,
value=None,
)
width = gr.Slider(
label="Width",
minimum=256,
maximum=2048,
step=8,
value=None,
)
# gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=False)
gr.on(
triggers=[run_button.click],
fn=infer,
inputs=[
image_1,
image_2,
image_3,
prompt,
seed,
randomize_seed,
true_guidance_scale,
num_inference_steps,
height,
width,
],
outputs=[result, seed, use_output_btn], # Added use_output_btn to outputs
)
# Add the new event handler for the "Use Output as Input" button
use_output_btn.click(
fn=use_output_as_input,
inputs=[result],
outputs=[image_1]
)
if __name__ == "__main__":
demo.launch(mcp_server=True, show_error=True) |