linoyts HF Staff commited on
Commit
9942d69
·
verified ·
1 Parent(s): 7b082e9

Update app.py (#1)

Browse files

- Update app.py (9742a7ce84cb8a4a5bd54fd0ea1cb2bda928441e)

Files changed (1) hide show
  1. app.py +54 -40
app.py CHANGED
@@ -273,15 +273,16 @@ Remove the background of image 2, and replace it with the background of image 1.
273
  """
274
 
275
  def use_output_as_input(output_images):
276
- """Convert output images to input format for the gallery"""
277
  if output_images is None or len(output_images) == 0:
278
- return []
279
- return output_images
280
 
281
  # --- Main Inference Function (with hardcoded negative prompt) ---
282
  @spaces.GPU()
283
  def infer(
284
- images,
 
285
  prompt,
286
  seed=42,
287
  randomize_seed=False,
@@ -297,7 +298,8 @@ def infer(
297
  Run image-editing inference using the Qwen-Image-Edit pipeline.
298
 
299
  Parameters:
300
- images (list): Input images from the Gradio gallery (PIL or path-based).
 
301
  prompt (str): Editing instruction (may be rewritten by LLM if enabled).
302
  seed (int): Random seed for reproducibility.
303
  randomize_seed (bool): If True, overrides seed with a random value.
@@ -324,17 +326,30 @@ def infer(
324
 
325
  # Load input images into PIL Images
326
  pil_images = []
327
- if images is not None:
328
- for item in images:
329
- try:
330
- if isinstance(item[0], Image.Image):
331
- pil_images.append(item[0].convert("RGB"))
332
- elif isinstance(item[0], str):
333
- pil_images.append(Image.open(item[0]).convert("RGB"))
334
- elif hasattr(item, "name"):
335
- pil_images.append(Image.open(item.name).convert("RGB"))
336
- except Exception:
337
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
338
 
339
  if height==256 and width==256:
340
  height, width = None, None
@@ -379,40 +394,38 @@ css = """
379
  #edit_text{margin-top: -62px !important}
380
  """
381
 
382
- with gr.Blocks(css=css) as demo:
383
  with gr.Column(elem_id="col-container"):
384
  gr.HTML("""
385
  <div id="logo-title">
386
- <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Edit Logo" width="400" style="display: block; margin: 0 auto;">
387
- <h2 style="font-style: italic;color: #5b47d1;margin-top: -27px !important;margin-left: 96px">[Plus] Fast, 4-steps with LightX2V LoRA</h2>
388
  </div>
389
  """)
390
  gr.Markdown("""
391
- [Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series.
392
- This demo uses the new [Qwen-Image-Edit-2511](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) with the [Qwen-Image-Lightning-2511](https://huggingface.co/lightx2v/Qwen-Image-Edit-2511-Lightning) LoRA for accelerated inference.
393
- Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit-2509) to run locally with ComfyUI or diffusers.
394
  """)
395
  with gr.Row():
396
- with gr.Column():
397
- input_images = gr.Gallery(label="Input Images",
398
- show_label=False,
399
- type="pil",
400
- interactive=True)
401
-
402
- with gr.Column():
 
 
 
 
 
 
 
 
403
  result = gr.Gallery(label="Result", show_label=False, type="pil", interactive=False)
404
  # Add this button right after the result gallery - initially hidden
405
  use_output_btn = gr.Button("↗️ Use as input", variant="secondary", size="sm", visible=False)
406
 
407
- with gr.Row():
408
- prompt = gr.Text(
409
- label="Prompt",
410
- value=DEFAULT_LORA_PROMPT,
411
- show_label=False,
412
- visible=False
413
- )
414
- run_button = gr.Button("Edit Pose", variant="primary")
415
-
416
  with gr.Accordion("Advanced Settings", open=False):
417
  # Negative prompt UI element is removed here
418
 
@@ -469,7 +482,8 @@ with gr.Blocks(css=css) as demo:
469
  triggers=[run_button.click, prompt.submit],
470
  fn=infer,
471
  inputs=[
472
- input_images,
 
473
  prompt,
474
  seed,
475
  randomize_seed,
@@ -486,7 +500,7 @@ with gr.Blocks(css=css) as demo:
486
  use_output_btn.click(
487
  fn=use_output_as_input,
488
  inputs=[result],
489
- outputs=[input_images]
490
  )
491
 
492
  if __name__ == "__main__":
 
273
  """
274
 
275
  def use_output_as_input(output_images):
276
+ """Convert output images to input format for the reference image"""
277
  if output_images is None or len(output_images) == 0:
278
+ return None
279
+ return output_images[0]
280
 
281
  # --- Main Inference Function (with hardcoded negative prompt) ---
282
  @spaces.GPU()
283
  def infer(
284
+ reference_image,
285
+ pose_image,
286
  prompt,
287
  seed=42,
288
  randomize_seed=False,
 
298
  Run image-editing inference using the Qwen-Image-Edit pipeline.
299
 
300
  Parameters:
301
+ reference_image: Reference image (PIL or path-based).
302
+ pose_image: Pose image (PIL or path-based).
303
  prompt (str): Editing instruction (may be rewritten by LLM if enabled).
304
  seed (int): Random seed for reproducibility.
305
  randomize_seed (bool): If True, overrides seed with a random value.
 
326
 
327
  # Load input images into PIL Images
328
  pil_images = []
329
+
330
+ # Process reference image (first)
331
+ if reference_image is not None:
332
+ try:
333
+ if isinstance(reference_image, Image.Image):
334
+ pil_images.append(reference_image.convert("RGB"))
335
+ elif isinstance(reference_image, str):
336
+ pil_images.append(Image.open(reference_image).convert("RGB"))
337
+ elif hasattr(reference_image, "name"):
338
+ pil_images.append(Image.open(reference_image.name).convert("RGB"))
339
+ except Exception:
340
+ pass
341
+
342
+ # Process pose image (second)
343
+ if pose_image is not None:
344
+ try:
345
+ if isinstance(pose_image, Image.Image):
346
+ pil_images.append(pose_image.convert("RGB"))
347
+ elif isinstance(pose_image, str):
348
+ pil_images.append(Image.open(pose_image).convert("RGB"))
349
+ elif hasattr(pose_image, "name"):
350
+ pil_images.append(Image.open(pose_image.name).convert("RGB"))
351
+ except Exception:
352
+ pass
353
 
354
  if height==256 and width==256:
355
  height, width = None, None
 
394
  #edit_text{margin-top: -62px !important}
395
  """
396
 
397
+ with gr.Blocks(css=css, theme=gr.themes.Citrus()) as demo:
398
  with gr.Column(elem_id="col-container"):
399
  gr.HTML("""
400
  <div id="logo-title">
401
+ <h1 style="color: #5b47d1;">🧘 Qwen Edit Any Pose</h1>
402
+ <h2 style="font-style: italic;color: #5b47d1;margin-top: -10px !important;">Fast 4-step pose transfer with AnyPose LoRA</h2>
403
  </div>
404
  """)
405
  gr.Markdown("""
406
+ Transfer any pose from a reference image to your subject using [Qwen-Image-Edit-2511](https://huggingface.co/Qwen/Qwen-Image-Edit-2511) with [AnyPose LoRA](https://huggingface.co/lilylilith/AnyPose) and [Lightning LoRA](https://huggingface.co/lightx2v/Qwen-Image-Edit-2511-Lightning) for fast inference.
407
+ [Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series.
 
408
  """)
409
  with gr.Row():
410
+ with gr.Column(scale=1):
411
+ with gr.Row():
412
+ reference_image = gr.Image(label="Reference Image", type="pil", interactive=True)
413
+ pose_image = gr.Image(label="Pose Image", type="pil", interactive=True)
414
+
415
+ with gr.Row():
416
+ prompt = gr.Text(
417
+ label="Prompt",
418
+ value=DEFAULT_LORA_PROMPT,
419
+ show_label=False,
420
+ visible=False
421
+ )
422
+ run_button = gr.Button("Edit Pose", variant="primary")
423
+
424
+ with gr.Column(scale=1):
425
  result = gr.Gallery(label="Result", show_label=False, type="pil", interactive=False)
426
  # Add this button right after the result gallery - initially hidden
427
  use_output_btn = gr.Button("↗️ Use as input", variant="secondary", size="sm", visible=False)
428
 
 
 
 
 
 
 
 
 
 
429
  with gr.Accordion("Advanced Settings", open=False):
430
  # Negative prompt UI element is removed here
431
 
 
482
  triggers=[run_button.click, prompt.submit],
483
  fn=infer,
484
  inputs=[
485
+ reference_image,
486
+ pose_image,
487
  prompt,
488
  seed,
489
  randomize_seed,
 
500
  use_output_btn.click(
501
  fn=use_output_as_input,
502
  inputs=[result],
503
+ outputs=[reference_image]
504
  )
505
 
506
  if __name__ == "__main__":