File size: 21,498 Bytes
71f5363
7d4ee71
 
 
 
71f5363
7d4ee71
3c34e29
 
 
 
 
6571814
0d5afae
ec6ec95
71f5363
7d4ee71
 
b73a917
7d4ee71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b001fe7
7d4ee71
b001fe7
7d4ee71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b001fe7
7d4ee71
b001fe7
 
7d4ee71
 
b001fe7
7d4ee71
 
 
 
 
 
 
 
8106715
b73a917
e0ec356
 
8106715
e0ec356
 
2ed2635
e0ec356
 
b73a917
 
8106715
e0ec356
 
 
ac6b97a
e0ec356
 
 
8106715
 
 
 
 
 
 
 
ac6b97a
8106715
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5f0141f
e0ec356
 
5f0141f
 
 
8106715
5f0141f
 
 
e0ec356
 
5f0141f
e0ec356
 
 
 
 
 
 
07b4ebe
e0ec356
 
 
 
 
 
 
 
 
 
 
 
ac6b97a
 
 
 
b73a917
8106715
 
7d4ee71
 
 
 
 
 
 
 
 
 
 
b6713ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d4ee71
f52eda4
b6713ac
 
 
da164d4
a25cc60
da164d4
a25cc60
 
 
 
 
 
 
 
 
 
 
7d4ee71
3c34e29
 
 
6571814
3c34e29
 
6571814
7d4ee71
 
 
a25cc60
 
 
 
 
 
 
3db5684
a25cc60
 
b001fe7
9942d69
b001fe7
9942d69
 
b001fe7
924c461
663b3d8
7d4ee71
9942d69
 
3db5684
7d4ee71
 
 
da164d4
7d4ee71
 
fae5064
7d4ee71
 
 
 
d4a6143
 
 
9942d69
 
d4a6143
 
 
 
 
 
 
 
 
 
 
 
 
7d4ee71
d4a6143
7d4ee71
 
 
 
 
 
 
 
 
 
 
9942d69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d4ee71
 
 
 
 
 
 
e0ec356
7d4ee71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b001fe7
8c9c636
7d4ee71
924c461
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7d4ee71
 
 
 
 
 
 
 
cada4f8
 
 
 
 
 
7d4ee71
 
 
9942d69
7d4ee71
7758b4a
 
76e90dd
16ca678
7758b4a
 
 
58185fd
9942d69
7758b4a
7d4ee71
9942d69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0c0745
b001fe7
 
 
7d4ee71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b6713ac
7d4ee71
 
 
 
 
b6713ac
7d4ee71
da164d4
7d4ee71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a25cc60
7d4ee71
fc8174f
 
 
5022930
fc8174f
 
 
 
 
924c461
 
fc8174f
 
 
7d4ee71
 
 
 
9942d69
 
7d4ee71
 
 
 
 
 
 
 
 
b001fe7
 
 
 
 
 
 
9942d69
7d4ee71
 
 
1a4a5ed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
import gradio as gr
import numpy as np
import random
import torch
import spaces

from PIL import Image
from diffusers import FlowMatchEulerDiscreteScheduler, QwenImageEditPlusPipeline
# from optimization import optimize_pipeline_
# from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
# from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
# from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3

from huggingface_hub import InferenceClient
import math

import os
import base64
from io import BytesIO
import json

SYSTEM_PROMPT = '''
# Edit Instruction Rewriter
You are a professional edit instruction rewriter. Your task is to generate a precise, concise, and visually achievable professional-level edit instruction based on the user-provided instruction and the image to be edited.  

Please strictly follow the rewriting rules below:

## 1. General Principles
- Keep the rewritten prompt **concise and comprehensive**. Avoid overly long sentences and unnecessary descriptive language.  
- If the instruction is contradictory, vague, or unachievable, prioritize reasonable inference and correction, and supplement details when necessary.  
- Keep the main part of the original instruction unchanged, only enhancing its clarity, rationality, and visual feasibility.  
- All added objects or modifications must align with the logic and style of the scene in the input images.  
- If multiple sub-images are to be generated, describe the content of each sub-image individually.  

## 2. Task-Type Handling Rules

### 1. Add, Delete, Replace Tasks
- If the instruction is clear (already includes task type, target entity, position, quantity, attributes), preserve the original intent and only refine the grammar.  
- If the description is vague, supplement with minimal but sufficient details (category, color, size, orientation, position, etc.). For example:  
    > Original: "Add an animal"  
    > Rewritten: "Add a light-gray cat in the bottom-right corner, sitting and facing the camera"  
- Remove meaningless instructions: e.g., "Add 0 objects" should be ignored or flagged as invalid.  
- For replacement tasks, specify "Replace Y with X" and briefly describe the key visual features of X.  

### 2. Text Editing Tasks
- All text content must be enclosed in English double quotes `" "`. Keep the original language of the text, and keep the capitalization.  
- Both adding new text and replacing existing text are text replacement tasks, For example:  
    - Replace "xx" to "yy"  
    - Replace the mask / bounding box to "yy"  
    - Replace the visual object to "yy"  
- Specify text position, color, and layout only if user has required.  
- If font is specified, keep the original language of the font.  

### 3. Human Editing Tasks
- Make the smallest changes to the given user's prompt.  
- If changes to background, action, expression, camera shot, or ambient lighting are required, please list each modification individually.
- **Edits to makeup or facial features / expression must be subtle, not exaggerated, and must preserve the subject's identity consistency.**
    > Original: "Add eyebrows to the face"  
    > Rewritten: "Slightly thicken the person's eyebrows with little change, look natural."

### 4. Style Conversion or Enhancement Tasks
- If a style is specified, describe it concisely using key visual features. For example:  
    > Original: "Disco style"  
    > Rewritten: "1970s disco style: flashing lights, disco ball, mirrored walls, vibrant colors"  
- For style reference, analyze the original image and extract key characteristics (color, composition, texture, lighting, artistic style, etc.), integrating them into the instruction.  
- **Colorization tasks (including old photo restoration) must use the fixed template:**  
  "Restore and colorize the old photo."  
- Clearly specify the object to be modified. For example:  
    > Original: Modify the subject in Picture 1 to match the style of Picture 2.  
    > Rewritten: Change the girl in Picture 1 to the ink-wash style of Picture 2 — rendered in black-and-white watercolor with soft color transitions.

### 5. Material Replacement
- Clearly specify the object and the material. For example: "Change the material of the apple to papercut style."
- For text material replacement, use the fixed template:
    "Change the material of text "xxxx" to laser style"

### 6. Logo/Pattern Editing
- Material replacement should preserve the original shape and structure as much as possible. For example:
   > Original: "Convert to sapphire material"  
   > Rewritten: "Convert the main subject in the image to sapphire material, preserving similar shape and structure"
- When migrating logos/patterns to new scenes, ensure shape and structure consistency. For example:
   > Original: "Migrate the logo in the image to a new scene"  
   > Rewritten: "Migrate the logo in the image to a new scene, preserving similar shape and structure"

### 7. Multi-Image Tasks
- Rewritten prompts must clearly point out which image's element is being modified. For example:  
    > Original: "Replace the subject of picture 1 with the subject of picture 2"  
    > Rewritten: "Replace the girl of picture 1 with the boy of picture 2, keeping picture 2's background unchanged"  
- For stylization tasks, describe the reference image's style in the rewritten prompt, while preserving the visual content of the source image.  

## 3. Rationale and Logic Check
- Resolve contradictory instructions: e.g., "Remove all trees but keep all trees" requires logical correction.
- Supplement missing critical information: e.g., if position is unspecified, choose a reasonable area based on composition (near subject, blank space, center/edge, etc.).

# Output Format Example
```json
{
   "Rewritten": "..."
}
'''

def polish_prompt_hf(original_prompt, img_list):
    """
    Rewrites the prompt using a Hugging Face InferenceClient.
    Supports multiple images via img_list.
    """
    # Ensure HF_TOKEN is set
    api_key = os.environ.get("inference_providers")
    if not api_key:
        print("Warning: HF_TOKEN not set. Falling back to original prompt.")
        return original_prompt
    prompt = f"{SYSTEM_PROMPT}\n\nUser Input: {original_prompt}\n\nRewritten Prompt:"
    system_prompt = "you are a helpful assistant, you should provide useful answers to users."
    try:
        # Initialize the client
        client = InferenceClient(
            provider="nebius",
            api_key=api_key,
        )

        # Convert list of images to base64 data URLs
        image_urls = []
        if img_list is not None:
            # Ensure img_list is actually a list
            if not isinstance(img_list, list):
                img_list = [img_list]
            
            for img in img_list:
                image_url = None
                # If img is a PIL Image
                if hasattr(img, 'save'):  # Check if it's a PIL Image
                    buffered = BytesIO()
                    img.save(buffered, format="PNG")
                    img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
                    image_url = f"data:image/png;base64,{img_base64}"
                # If img is already a file path (string)
                elif isinstance(img, str):
                    with open(img, "rb") as image_file:
                        img_base64 = base64.b64encode(image_file.read()).decode('utf-8')
                    image_url = f"data:image/png;base64,{img_base64}"
                else:
                    print(f"Warning: Unexpected image type: {type(img)}, skipping...")
                    continue
                
                if image_url:
                    image_urls.append(image_url)

        # Build the content array with text first, then all images
        content = [
            {
                "type": "text",
                "text": prompt
            }
        ]
        
        # Add all images to the content
        for image_url in image_urls:
            content.append({
                "type": "image_url",
                "image_url": {
                    "url": image_url
                }
            })

        # Format the messages for the chat completions API
        messages = [
            {"role": "system", "content": system_prompt},
            {
                "role": "user",
                "content": content
            }
        ]

        # Call the API
        completion = client.chat.completions.create(
            model="Qwen/Qwen2.5-VL-72B-Instruct",
            messages=messages,
        )
        
        # Parse the response
        result = completion.choices[0].message.content
        
        # Try to extract JSON if present
        if '"Rewritten"' in result:
            try:
                # Clean up the response
                result = result.replace('```json', '').replace('```', '')
                result_json = json.loads(result)
                polished_prompt = result_json.get('Rewritten', result)
            except:
                polished_prompt = result
        else:
            polished_prompt = result
            
        polished_prompt = polished_prompt.strip().replace("\n", " ")
        return polished_prompt
        
    except Exception as e:
        print(f"Error during API call to Hugging Face: {e}")
        # Fallback to original prompt if enhancement fails
        return original_prompt 



def encode_image(pil_image):
    import io
    buffered = io.BytesIO()
    pil_image.save(buffered, format="PNG")
    return base64.b64encode(buffered.getvalue()).decode("utf-8")

# --- Model Loading ---
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"

# Scheduler configuration for Lightning
scheduler_config = {
    "base_image_seq_len": 256,
    "base_shift": math.log(3),
    "invert_sigmas": False,
    "max_image_seq_len": 8192,
    "max_shift": math.log(3),
    "num_train_timesteps": 1000,
    "shift": 1.0,
    "shift_terminal": None,
    "stochastic_sampling": False,
    "time_shift_type": "exponential",
    "use_beta_sigmas": False,
    "use_dynamic_shifting": True,
    "use_exponential_sigmas": False,
    "use_karras_sigmas": False,
}

# Initialize scheduler with Lightning config
scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)

# Load the model pipeline
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2511", 
                                                 scheduler=scheduler,
                                                 torch_dtype=dtype).to(device)
pipe.load_lora_weights(
        "lightx2v/Qwen-Image-Edit-2511-Lightning", 
        weight_name="Qwen-Image-Edit-2511-Lightning-4steps-V1.0-bf16.safetensors",adapter_name="fast"
)
pipe.load_lora_weights(
        "lilylilith/AnyPose", 
        weight_name="2511-AnyPose-base-000006250.safetensors",adapter_name="base"
)
pipe.load_lora_weights(
        "lilylilith/AnyPose", 
        weight_name="2511-AnyPose-helper-00006000.safetensors",adapter_name="helper"
)
pipe.set_adapters(["fast", "base", "helper"], adapter_weights=[1., 0.7, 0.7])
pipe.fuse_lora(adapter_names=["fast", "base", "helper"], lora_scale=1)
pipe.unload_lora_weights()

# # Apply the same optimizations from the first version
# pipe.transformer.__class__ = QwenImageTransformer2DModel
# pipe.transformer.set_attn_processor(QwenDoubleStreamAttnProcessorFA3())

# # --- Ahead-of-time compilation ---
# optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB", (1024, 1024))], prompt="prompt")

# --- UI Constants and Helpers ---
MAX_SEED = np.iinfo(np.int32).max

DEFAULT_LORA_PROMPT = """
Make the person in image 1 do the exact same pose of the person in image 2. 
Changing the style and background of the image of the person in image 1 is undesirable, so don't do it. 
The new pose should be pixel accurate to the pose we are trying to copy. 
The position of the arms and head and legs should be the same as the pose we are trying to copy. 
Change the field of view and angle to match exactly image 2. Head tilt and eye gaze pose should match the person in image 2. 
Remove the background of image 2, and replace it with the background of image 1.
Don't change the identity of the person in image 1, keep their appearance the same, it is undesirable to change their facical features or hair style. don't do it.
"""

def use_output_as_input(output_images):
    """Convert output images to input format for the reference image"""
    if output_images is None or len(output_images) == 0:
        return None
    return output_images[0]

    
@spaces.GPU()
def infer(
    reference_image,
    pose_image,
    prompt=DEFAULT_LORA_PROMPT,
    seed=42,
    randomize_seed=False,
    true_guidance_scale=1.0,
    num_inference_steps=4,
    height=None,
    width=None,
    rewrite_prompt=False,
    num_images_per_prompt=1,
    progress=gr.Progress(track_tqdm=True),
):
    """
    Run image-editing inference using the Qwen-Image-Edit pipeline.

    Parameters:
        reference_image: Reference image (PIL or path-based).
        pose_image: Pose image (PIL or path-based).
        prompt (str): Editing instruction (may be rewritten by LLM if enabled).
        seed (int): Random seed for reproducibility.
        randomize_seed (bool): If True, overrides seed with a random value.
        true_guidance_scale (float): CFG scale used by Qwen-Image.
        num_inference_steps (int): Number of diffusion steps.
        height (int | None): Optional output height override.
        width (int | None): Optional output width override.
        rewrite_prompt (bool): Whether to rewrite the prompt using Qwen-2.5-VL.
        num_images_per_prompt (int): Number of images to generate.
        progress: Gradio progress callback.

    Returns:
        tuple: (generated_images, seed_used, UI_visibility_update)
    """
    
    # Hardcode the negative prompt as requested
    negative_prompt = " "
    
    if randomize_seed:
        seed = random.randint(0, MAX_SEED)

    # Set up the generator for reproducibility
    generator = torch.Generator(device=device).manual_seed(seed)
    
    # Load input images into PIL Images
    pil_images = []
    
    # Process reference image (first)
    if reference_image is not None:
        try:
            if isinstance(reference_image, Image.Image):
                pil_images.append(reference_image.convert("RGB"))
            elif isinstance(reference_image, str):
                pil_images.append(Image.open(reference_image).convert("RGB"))
            elif hasattr(reference_image, "name"):
                pil_images.append(Image.open(reference_image.name).convert("RGB"))
        except Exception:
            pass
    
    # Process pose image (second)
    if pose_image is not None:
        try:
            if isinstance(pose_image, Image.Image):
                pil_images.append(pose_image.convert("RGB"))
            elif isinstance(pose_image, str):
                pil_images.append(Image.open(pose_image).convert("RGB"))
            elif hasattr(pose_image, "name"):
                pil_images.append(Image.open(pose_image.name).convert("RGB"))
        except Exception:
            pass

    if height==256 and width==256:
        height, width = None, None
    print(f"Calling pipeline with prompt: '{prompt}'")
    print(f"Negative Prompt: '{negative_prompt}'")
    print(f"Seed: {seed}, Steps: {num_inference_steps}, Guidance: {true_guidance_scale}, Size: {width}x{height}")
    if rewrite_prompt and len(pil_images) > 0:
        prompt = polish_prompt_hf(prompt, pil_images)
        print(f"Rewritten Prompt: {prompt}")
    

    # Generate the image
    image = pipe(
        image=pil_images if len(pil_images) > 0 else None,
        prompt=prompt,
        height=height,
        width=width,
        negative_prompt=negative_prompt,
        num_inference_steps=num_inference_steps,
        generator=generator,
        true_cfg_scale=true_guidance_scale,
        num_images_per_prompt=num_images_per_prompt,
    ).images

    # Return images, seed, and make button visible
    return image, seed, gr.update(visible=False)



def infer_for_examples(
    reference_image,
    pose_image,
    prompt=DEFAULT_LORA_PROMPT,
    seed=42,
    randomize_seed=False,
    true_guidance_scale=1.0,
    num_inference_steps=4,
    height=None,
    width=None,
    rewrite_prompt=False,
    num_images_per_prompt=1,
    progress=gr.Progress(track_tqdm=True),
):
    """Wrapper for examples that only returns image and seed."""
    image, seed, _ = infer(
        reference_image, pose_image, prompt, seed, randomize_seed,
        true_guidance_scale, num_inference_steps, height, width,
        rewrite_prompt, num_images_per_prompt, progress
    )
    return image, seed

# --- Examples and UI Layout ---
examples = []

css = """
#col-container {
    margin: 0 auto;
    max-width: 1024px;
}
#logo-title {
    text-align: center;
}
#logo-title img {
    width: 400px;
}
#edit_text{margin-top: -62px !important}
"""

with gr.Blocks(css=css, theme=gr.themes.Citrus()) as demo:
    with gr.Column(elem_id="col-container"):
        gr.HTML("""
        <div id="logo-title">
            <h1 style="color: #5b47d1;">Qwen Edit Any Pose 🕺 </h1>
            <h2 style="font-style: italic;color: #5b47d1;margin-top: -10px !important;">Fast 4-step pose transfer with Qwen Edit 2511 & AnyPose LoRA</h2>
        </div>
        """)
        gr.Markdown("""
        Transfer any pose from a reference image to your subject using [Qwen-Image-Edit-2511](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) with [lilylilith/AnyPose LoRA](https://huggingface.co/lilylilith/AnyPose) and [lightx2v Lightning LoRA](https://huggingface.co/lightx2v/Qwen-Image-Edit-2511-Lightning) for fast inference.
        [Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series.
        """)
        with gr.Row():
            with gr.Column(scale=1):
                with gr.Row():
                    reference_image = gr.Image(label="Reference Image", type="pil", interactive=True)
                    pose_image = gr.Image(label="Pose Image", type="pil", interactive=True)
                
                with gr.Row():
                    prompt = gr.Text(
                            label="Prompt",
                            value=DEFAULT_LORA_PROMPT,
                            show_label=False,
                            visible=False
                    )
                    run_button = gr.Button("Edit Pose", variant="primary")

            with gr.Column(scale=1):
                result = gr.Gallery(label="Result", show_label=False, type="pil", interactive=False)
                # Add this button right after the result gallery - initially hidden
                use_output_btn = gr.Button("↗️ Use as input", variant="secondary", size="sm", visible=False)

        with gr.Accordion("Advanced Settings", open=False):
            # Negative prompt UI element is removed here

            seed = gr.Slider(
                label="Seed",
                minimum=0,
                maximum=MAX_SEED,
                step=1,
                value=0,
            )

            randomize_seed = gr.Checkbox(label="Randomize seed", value=True)

            with gr.Row():

                true_guidance_scale = gr.Slider(
                    label="True guidance scale",
                    minimum=1.0,
                    maximum=10.0,
                    step=0.1,
                    value=1.0
                )

                num_inference_steps = gr.Slider(
                    label="Number of inference steps",
                    minimum=1,
                    maximum=40,
                    step=1,
                    value=4,
                )
                
                height = gr.Slider(
                    label="Height",
                    minimum=256,
                    maximum=2048,
                    step=8,
                    value=None,
                )
                
                width = gr.Slider(
                    label="Width",
                    minimum=256,
                    maximum=2048,
                    step=8,
                    value=None,
                )
                
                
                rewrite_prompt = gr.Checkbox(label="Rewrite prompt", value=False, visible=False)

        gr.Examples(
        examples=[
            ["s-l1200.jpg","High-Lunge_Andrew-Clark.jpg"],
            ["009.jpg","wednesday.png"],
        ],
        inputs=[
            reference_image,
            pose_image,
        ],
        outputs=[result, seed],
        fn=infer_for_examples,
        cache_examples=True,
        cache_mode="lazy",
    )
    gr.on(
        triggers=[run_button.click, prompt.submit],
        fn=infer,
        inputs=[
            reference_image,
            pose_image,
            prompt,
            seed,
            randomize_seed,
            true_guidance_scale,
            num_inference_steps,
            height,
            width,
            rewrite_prompt,
        ],
        outputs=[result, seed, use_output_btn],  # Added use_output_btn to outputs
    )

    # Add the new event handler for the "Use Output as Input" button
    use_output_btn.click(
        fn=use_output_as_input,
        inputs=[result],
        outputs=[reference_image]
    )

if __name__ == "__main__":
    demo.launch(mcp_server=True)