cpuai commited on
Commit
2cea403
·
verified ·
1 Parent(s): 1e18fce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -131
app.py CHANGED
@@ -1,5 +1,10 @@
1
  import os
 
 
 
 
2
  import gc
 
3
  import gradio as gr
4
  import numpy as np
5
  import spaces
@@ -10,9 +15,6 @@ from typing import Iterable
10
  from gradio.themes import Soft
11
  from gradio.themes.utils import colors, fonts, sizes
12
 
13
- # =========================
14
- # Theme
15
- # =========================
16
  colors.orange_red = colors.Color(
17
  name="orange_red",
18
  c50="#FFF0E5",
@@ -87,11 +89,20 @@ print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
87
  print("torch.__version__ =", torch.__version__)
88
  print("Using device:", device)
89
 
 
 
 
 
 
 
90
  from diffusers import FlowMatchEulerDiscreteScheduler
91
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
92
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
93
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
94
 
 
 
 
95
  dtype = torch.bfloat16
96
 
97
  pipe = QwenImageEditPlusPipeline.from_pretrained(
@@ -110,6 +121,19 @@ try:
110
  except Exception as e:
111
  print(f"Warning: Could not set FA3 processor: {e}")
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  MAX_SEED = np.iinfo(np.int32).max
114
 
115
  ADAPTER_SPECS = {
@@ -187,126 +211,90 @@ ADAPTER_SPECS = {
187
 
188
  LOADED_ADAPTERS = set()
189
 
190
- # =========================
191
- # 尺寸策略(核心修复
192
- # =========================
193
- def _round_to_multiple(x: int, m: int) -> int:
194
- """四舍五入到最接近的 m 倍数(保证 >= m)"""
195
- if m <= 0:
196
- return max(1, int(x))
197
- v = int(round(x / m) * m)
198
- return max(m, v)
199
-
200
- def _floor_to_multiple(x: int, m: int) -> int:
201
- """向下取整到 m 倍数(保证 >= m)"""
202
- if m <= 0:
203
- return max(1, int(x))
204
- v = int(x // m * m)
205
- return max(m, v)
206
-
207
- def _clamp_by_max_patches(width: int, height: int, m: int, max_patches: int) -> tuple[int, int]:
208
  """
209
- Qwen-Image latents patch 数大等于:
210
- patches = (width//m) * (height//m)
211
- 其中 m = vae_scale_factor*2。
212
- 超过 max_patches(通常 4096)就容易报错/炸显存/超模型上限。:contentReference[oaicite:2]{index=2}
213
  """
214
- width = max(m, width)
215
- height = max(m, height)
216
-
217
- patches = (width // m) * (height // m)
218
- if patches <= max_patches:
219
- return width, height
220
-
221
- # 先按面积比例缩放(保持宽高比)
222
- scale = (max_patches / float(patches)) ** 0.5
223
- width = int(width * scale)
224
- height = int(height * scale)
225
-
226
- # 缩放后向下对齐倍数,确保不会再次超过上限
227
- width = _floor_to_multiple(width, m)
228
- height = _floor_to_multiple(height, m)
229
-
230
- # 极端情况下仍超:用简单迭代再压一点点
231
- while (width // m) * (height // m) > max_patches and width > m and height > m:
232
- if width >= height:
233
- width = max(m, width - m)
234
- else:
235
- height = max(m, height - m)
236
-
237
  return width, height
238
 
239
- def compute_target_dimensions(pil_img: Image.Image, size_mode: str) -> tuple[int, int]:
240
  """
241
- size_mode:
242
- - "原图大小(不裁剪"
243
- - "原图2倍不裁剪,超自动缩回"
244
- 注意:这里不对输入图做任何裁剪,只计算输出 width/height。
245
  """
246
- if pil_img is None:
247
- return 1024, 1024
248
-
249
- ow, oh = pil_img.size
250
-
251
- # m 来自 pipeline 的要求:height/width 需要能被 vae_scale_factor*2 整除(否则会被对齐/重算):contentReference[oaicite:3]{index=3}
252
- multiple_of = int(getattr(pipe, "vae_scale_factor", 8) * 2)
253
 
254
- # Qwen pipeline 的 scheduler config 默认 max_image_seq_len=4096(用于 image_seq_len 上限):contentReference[oaicite:4]{index=4}
255
- max_patches = int(getattr(pipe, "scheduler", None).config.get("max_image_seq_len", 4096))
256
-
257
- scale = 1
258
- if size_mode.startswith("原图2倍"):
259
- scale = 2
260
 
261
- tw = ow * scale
262
- th = oh * scale
263
 
264
- # “取最接近原图大小”:优先四舍五入到最接近的合法倍数而不是一律向下砍
265
- tw = _round_to_multiple(tw, multiple_of)
266
- th = _round_to_multiple(th, multiple_of)
267
 
268
- # 2 倍导致 patch 超限,则自动缩回(仍不裁剪,只缩放输出尺寸)
269
- tw, th = _clamp_by_max_patches(tw, th, multiple_of, max_patches)
270
 
271
- return tw, th
 
 
 
 
 
272
 
273
- # 仅用于 UI:上传后默认给一个“看起来合理”的预估值(不影响最终 infer 的计算)
274
- def update_dimensions_on_upload(image):
275
- if image is None:
276
- return 1024, 1024
277
 
278
- # 保留你原始逻辑:最长边对齐 1024仅做预估显示/占位
279
- original_width, original_height = image.size
 
 
 
280
 
281
- if original_width > original_height:
282
- new_width = 1024
283
- aspect_ratio = original_height / original_width
284
- new_height = int(new_width * aspect_ratio)
285
- else:
286
- new_height = 1024
287
- aspect_ratio = original_width / original_height
288
- new_width = int(new_height * aspect_ratio)
289
 
290
- # 这里原代码是 //8*8;但真实 pipeline 里最终会按 vae_scale_factor*2 再对齐 :contentReference[oaicite:5]{index=5}
291
- new_width = (new_width // 8) * 8
292
- new_height = (new_height // 8) * 8
293
 
294
- return new_width, new_height
 
 
 
295
 
296
  @spaces.GPU
297
  def infer(
298
  images,
299
  prompt,
300
  lora_adapter,
301
- size_mode, # 新增:目标图片大小选项
302
  seed,
303
  randomize_seed,
304
  guidance_scale,
305
  steps,
306
  progress=gr.Progress(track_tqdm=True)
307
  ):
308
- gc.collect()
309
- torch.cuda.empty_cache()
310
 
311
  if not images:
312
  raise gr.Error("Please upload at least one image to edit.")
@@ -315,7 +303,7 @@ def infer(
315
  if images is not None:
316
  for item in images:
317
  try:
318
- if isinstance(item, tuple) or isinstance(item, list):
319
  path_or_img = item[0]
320
  else:
321
  path_or_img = item
@@ -359,33 +347,52 @@ def infer(
359
  seed = random.randint(0, MAX_SEED)
360
 
361
  generator = torch.Generator(device=device).manual_seed(seed)
362
- negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
363
 
364
- # 关键:��用户选择计算输出尺寸(不裁剪)
365
- width, height = compute_target_dimensions(pil_images[0], size_mode)
 
 
 
 
 
 
 
 
366
 
367
  try:
368
  result_image = pipe(
369
  image=pil_images,
370
  prompt=prompt,
371
  negative_prompt=negative_prompt,
372
- height=height,
373
- width=width,
374
  num_inference_steps=steps,
375
  generator=generator,
376
  true_cfg_scale=guidance_scale,
377
  ).images[0]
378
 
 
 
 
 
 
379
  return result_image, seed
380
 
381
- except Exception as e:
 
 
 
 
 
 
 
 
382
  raise e
383
  finally:
384
- gc.collect()
385
- torch.cuda.empty_cache()
386
 
387
  @spaces.GPU
388
- def infer_example(images, prompt, lora_adapter, size_mode):
389
  if not images:
390
  return None, 0
391
 
@@ -398,7 +405,7 @@ def infer_example(images, prompt, lora_adapter, size_mode):
398
  images=images_list,
399
  prompt=prompt,
400
  lora_adapter=lora_adapter,
401
- size_mode=size_mode,
402
  seed=0,
403
  randomize_seed=True,
404
  guidance_scale=1.0,
@@ -417,7 +424,7 @@ css = """
417
  with gr.Blocks() as demo:
418
  with gr.Column(elem_id="col-container"):
419
  gr.Markdown("# **Qwen-Image-Edit-2511-LoRAs-Fast**", elem_id="main-title")
420
- gr.Markdown("Perform diverse image edits using specialized [LoRA](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image-Edit-2511) adapters. Upload one or more images.")
421
 
422
  with gr.Row(equal_height=True):
423
  with gr.Column():
@@ -448,14 +455,12 @@ with gr.Blocks() as demo:
448
  value="Photo-to-Anime"
449
  )
450
 
451
- # 新增:目标图片大小选项(解决“原图大小不裁剪”和“2倍超限报错”)
452
- size_mode = gr.Radio(
453
- label="Target Size",
454
- choices=[
455
- "原图大小(不裁剪)",
456
- "原图2倍(裁剪超限自动回)",
457
- ],
458
- value="原图大小(不裁剪)",
459
  )
460
 
461
  with gr.Accordion("Advanced Settings", open=False, visible=False):
@@ -466,22 +471,22 @@ with gr.Blocks() as demo:
466
 
467
  gr.Examples(
468
  examples=[
469
- [["examples/B.jpg"], "Transform into anime.", "Photo-to-Anime", "原图大小(不裁剪)"],
470
- [["examples/HRP.jpg"], "Transform into a hyper-realistic face portrait.", "Hyper-Realistic-Portrait", "原图大小(不裁剪)"],
471
- [["examples/A.jpeg"], "Rotate the camera 45 degrees to the right.", "Multiple-Angles", "原图大小(不裁剪)"],
472
- [["examples/U.jpg"], "Upscale this picture to 4K resolution.", "Upscaler", "原图2倍(不裁剪,超限自动缩回)"],
473
- [["examples/PP1.jpg"], "cinematic polaroid with soft grain subtle vignette gentle lighting white frame handwritten photographed by hf‪‪‬ preserving realistic texture and details", "Polaroid-Photo", "原图大小(不裁剪)"],
474
- [["examples/Z1.jpg"], "Front-right quarter view.", "Fal-Multiple-Angles", "原图大小(不裁剪)"],
475
- [["examples/MT.jpg"], "Paint with manga tone.", "Manga-Tone", "原图大小(不裁剪)"],
476
- [["examples/URP.jpg"], "ultra-realistic portrait.", "Ultra-Realistic-Portrait", "原图大小(不裁剪)"],
477
- [["examples/MN.jpg"], "Transform into Midnight Noir Eyes Spotlight.", "Midnight-Noir-Eyes-Spotlight", "原图大小(不裁剪)"],
478
- [["examples/ST1.jpg", "examples/ST2.jpg"], "Convert Image 1 to the style of Image 2.", "Style-Transfer", "原图大小(不裁剪)"],
479
- [["examples/R1.jpg"], "Change the picture to realistic photograph.", "Anything2Real", "原图大小(不裁剪)"],
480
- [["examples/UA.jpeg"], "Unblur and upscale.", "Unblur-Anything", "原图2倍(不裁剪,超限自动缩回)"],
481
- [["examples/L1.jpg", "examples/L2.jpg"], "Refer to the color tone, remove the original lighting from Image 1, and relight Image 1 based on the lighting and color tone of Image 2.", "Light-Migration", "原图大小(不裁剪)"],
482
- [["examples/P1.jpg"], "Transform into anime (while preserving the background and remaining elements maintaining realism and original details.)", "Anime-V2", "原图大小(不裁剪)"],
483
  ],
484
- inputs=[images, prompt, lora_adapter, size_mode],
485
  outputs=[output_image, seed],
486
  fn=infer_example,
487
  cache_examples=False,
@@ -492,7 +497,7 @@ with gr.Blocks() as demo:
492
 
493
  run_button.click(
494
  fn=infer,
495
- inputs=[images, prompt, lora_adapter, size_mode, seed, randomize_seed, guidance_scale, steps],
496
  outputs=[output_image, seed]
497
  )
498
 
 
1
  import os
2
+ # 建议:减少 CUDA 显存碎片化(对 HF Spaces 上偶发 NVML/CUDACachingAllocator 报错有帮助)
3
+ # 该环境变量需要在 import torch 前设置才更有效。 :contentReference[oaicite:5]{index=5}
4
+ os.environ.setdefault("PYTORCH_CUDA_ALLOC_CONF", "expandable_segments:True,max_split_size_mb:128")
5
+
6
  import gc
7
+ import math
8
  import gradio as gr
9
  import numpy as np
10
  import spaces
 
15
  from gradio.themes import Soft
16
  from gradio.themes.utils import colors, fonts, sizes
17
 
 
 
 
18
  colors.orange_red = colors.Color(
19
  name="orange_red",
20
  c50="#FFF0E5",
 
89
  print("torch.__version__ =", torch.__version__)
90
  print("Using device:", device)
91
 
92
+ # 可选:对推理速度有益
93
+ try:
94
+ torch.backends.cuda.matmul.allow_tf32 = True
95
+ except Exception:
96
+ pass
97
+
98
  from diffusers import FlowMatchEulerDiscreteScheduler
99
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
100
  from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
101
  from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
102
 
103
+ # 关键:导入 module 本身,用于动态修改 VAE_IMAGE_SIZE(workaround) :contentReference[oaicite:6]{index=6}
104
+ import qwenimage.pipeline_qwenimage_edit_plus as qwen_edit_module
105
+
106
  dtype = torch.bfloat16
107
 
108
  pipe = QwenImageEditPlusPipeline.from_pretrained(
 
121
  except Exception as e:
122
  print(f"Warning: Could not set FA3 processor: {e}")
123
 
124
+ # 降显存:VAE 切片/平铺(不同 diffusers 版本方法可能存在,因此用 try 包一下) :contentReference[oaicite:7]{index=7}
125
+ try:
126
+ pipe.enable_vae_slicing()
127
+ print("VAE slicing enabled.")
128
+ except Exception as e:
129
+ print(f"Warning: enable_vae_slicing not available: {e}")
130
+
131
+ try:
132
+ pipe.enable_vae_tiling()
133
+ print("VAE tiling enabled.")
134
+ except Exception as e:
135
+ print(f"Warning: enable_vae_tiling not available: {e}")
136
+
137
  MAX_SEED = np.iinfo(np.int32).max
138
 
139
  ADAPTER_SPECS = {
 
211
 
212
  LOADED_ADAPTERS = set()
213
 
214
+ # -----------------------------
215
+ # 尺寸相关核心逻辑(修复“截取中间” + 防止 2 倍崩
216
+ # -----------------------------
217
+ MAX_IMAGE_SEQ_LEN = 4096 # pipeline calculate_shift 默认的 max_seq_len(我们用它做安全上限) :contentReference[oaicite:8]{index=8}
218
+
219
+ def _calculate_dimensions_like_pipeline(target_area: float, ratio: float) -> tuple[int, int]:
 
 
 
 
 
 
 
 
 
 
 
 
220
  """
221
+ pipeline calculate_dimensions 保持一:按 32 对齐的最接近尺寸。 :contentReference[oaicite:9]{index=9}
 
 
 
222
  """
223
+ width = math.sqrt(target_area * ratio)
224
+ height = width / ratio
225
+ width = round(width / 32) * 32
226
+ height = round(height / 32) * 32
227
+ width = max(32, int(width))
228
+ height = max(32, int(height))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
  return width, height
230
 
231
+ def _pick_infer_size(pil_image: Image.Image, size_mode: str) -> tuple[int, int, int, int]:
232
  """
233
+ 返回:(infer_w, infer_h, requested_w, requested_h)
234
+ - requested_w/h:用户期望输出尺寸(原图 / 2 倍
235
+ - infer_w/h:本次实际喂给模型推理的尺寸32 对齐 + 制 seq_len,避免 OOM
 
236
  """
237
+ ow, oh = pil_image.size
 
 
 
 
 
 
238
 
239
+ if size_mode == "原图的2倍":
240
+ req_w, req_h = ow * 2, oh * 2
241
+ else:
242
+ req_w, req_h = ow, oh
 
 
243
 
244
+ # QwenImage 这条 pipeline 的像素维度最终要求至少能被 (vae_scale_factor*2) 整除,否则内部会强制 reshape/截断。 :contentReference[oaicite:10]{index=10}
245
+ multiple_of = max(16, int(getattr(pipe, "vae_scale_factor", 8)) * 2)
246
 
247
+ # patch 数 做硬上限seq_len = (w/m)*(h/m),超过 4096 极容易触发显存/速度问题甚至 OOM
248
+ # max_area = 4096 * m * m
249
+ max_area = MAX_IMAGE_SEQ_LEN * (multiple_of * multiple_of)
250
 
251
+ req_area = req_w * req_h
252
+ ratio = req_w / req_h
253
 
254
+ # 如果请求面积过大,先按面积等比缩小到 max_area(“取最接近且可跑通”)
255
+ if req_area > max_area:
256
+ scale = math.sqrt(max_area / req_area)
257
+ target_area = req_area * scale * scale
258
+ else:
259
+ target_area = req_area
260
 
261
+ infer_w, infer_h = _calculate_dimensions_like_pipeline(target_area, ratio)
 
 
 
262
 
263
+ # 再做一次 m 对齐(32 本身一般是 16 的倍数,但这里保险
264
+ infer_w = (infer_w // multiple_of) * multiple_of
265
+ infer_h = (infer_h // multiple_of) * multiple_of
266
+ infer_w = max(multiple_of, infer_w)
267
+ infer_h = max(multiple_of, infer_h)
268
 
269
+ # 最终兜底:确保 seq_len <= 4096
270
+ while (infer_w // multiple_of) * (infer_h // multiple_of) > MAX_IMAGE_SEQ_LEN:
271
+ if infer_w >= infer_h:
272
+ infer_w -= multiple_of
273
+ else:
274
+ infer_h -= multiple_of
275
+ if infer_w < multiple_of or infer_h < multiple_of:
276
+ break
277
 
278
+ return infer_w, infer_h, req_w, req_h
 
 
279
 
280
+ def _maybe_cuda_cleanup():
281
+ gc.collect()
282
+ if torch.cuda.is_available():
283
+ torch.cuda.empty_cache()
284
 
285
  @spaces.GPU
286
  def infer(
287
  images,
288
  prompt,
289
  lora_adapter,
290
+ target_size_mode, # 新增:目标尺寸选项
291
  seed,
292
  randomize_seed,
293
  guidance_scale,
294
  steps,
295
  progress=gr.Progress(track_tqdm=True)
296
  ):
297
+ _maybe_cuda_cleanup()
 
298
 
299
  if not images:
300
  raise gr.Error("Please upload at least one image to edit.")
 
303
  if images is not None:
304
  for item in images:
305
  try:
306
+ if isinstance(item, (tuple, list)):
307
  path_or_img = item[0]
308
  else:
309
  path_or_img = item
 
347
  seed = random.randint(0, MAX_SEED)
348
 
349
  generator = torch.Generator(device=device).manual_seed(seed)
 
350
 
351
+ negative_prompt = (
352
+ "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, "
353
+ "cropped, jpeg artifacts, signature, watermark, username, blurry"
354
+ )
355
+
356
+ # 关键:计算本次推理尺寸(防止 2 倍崩),并动态设置 VAE_IMAGE_SIZE 防止“截取中间” :contentReference[oaicite:11]{index=11}
357
+ infer_w, infer_h, req_w, req_h = _pick_infer_size(pil_images[0], target_size_mode)
358
+ qwen_edit_module.VAE_IMAGE_SIZE = int(infer_w * infer_h)
359
+
360
+ print(f"[SizeMode={target_size_mode}] requested={req_w}x{req_h}, infer={infer_w}x{infer_h}, VAE_IMAGE_SIZE={qwen_edit_module.VAE_IMAGE_SIZE}")
361
 
362
  try:
363
  result_image = pipe(
364
  image=pil_images,
365
  prompt=prompt,
366
  negative_prompt=negative_prompt,
367
+ height=infer_h,
368
+ width=infer_w,
369
  num_inference_steps=steps,
370
  generator=generator,
371
  true_cfg_scale=guidance_scale,
372
  ).images[0]
373
 
374
+ # 如果模型推理尺寸与用户期望不同,这里用 resize 保证“原图大小 / 2 倍”的输出尺寸一致(不裁剪,只缩放)
375
+ # 注意:若你希望“不支持就返回最接近尺寸”,可注释掉下面这段 resize。
376
+ if (result_image.size[0], result_image.size[1]) != (req_w, req_h):
377
+ result_image = result_image.resize((req_w, req_h), Image.LANCZOS)
378
+
379
  return result_image, seed
380
 
381
+ except RuntimeError as e:
382
+ # 针对 HF Spaces 常见的 NVML_SUCCESS INTERNAL ASSERT FAILED(通常是 OOM/碎片化触发)给更明确的提示 :contentReference[oaicite:12]{index=12}
383
+ msg = str(e)
384
+ if "NVML_SUCCESS" in msg or "CUDACachingAllocator" in msg or "out of memory" in msg.lower():
385
+ _maybe_cuda_cleanup()
386
+ raise gr.Error(
387
+ "推理失败:疑似显存不足/显存碎片化(常见于 VAE decode 阶段)。"
388
+ "建议:降低目标尺寸(或用“原图大小”而非 2 倍)、减少 steps,或避免频繁切换/加载大量 LoRA。"
389
+ )
390
  raise e
391
  finally:
392
+ _maybe_cuda_cleanup()
 
393
 
394
  @spaces.GPU
395
+ def infer_example(images, prompt, lora_adapter):
396
  if not images:
397
  return None, 0
398
 
 
405
  images=images_list,
406
  prompt=prompt,
407
  lora_adapter=lora_adapter,
408
+ target_size_mode="原图大小", # 示例默认用原图大小
409
  seed=0,
410
  randomize_seed=True,
411
  guidance_scale=1.0,
 
424
  with gr.Blocks() as demo:
425
  with gr.Column(elem_id="col-container"):
426
  gr.Markdown("# **Qwen-Image-Edit-2511-LoRAs-Fast**", elem_id="main-title")
427
+ gr.Markdown("Perform diverse image edits using specialized LoRA adapters. Upload one or more images.")
428
 
429
  with gr.Row(equal_height=True):
430
  with gr.Column():
 
455
  value="Photo-to-Anime"
456
  )
457
 
458
+ # 新增:目标图片大小选项
459
+ target_size_mode = gr.Radio(
460
+ label="目标图片大小",
461
+ choices=["原图大小", "原图的2倍"],
462
+ value="原图大小",
463
+ info="如尺寸过大导致模型/显存支持自动取最接近可推理尺寸;最终输出会 resize 你选择的尺寸(不裁剪"
 
 
464
  )
465
 
466
  with gr.Accordion("Advanced Settings", open=False, visible=False):
 
471
 
472
  gr.Examples(
473
  examples=[
474
+ [["examples/B.jpg"], "Transform into anime.", "Photo-to-Anime"],
475
+ [["examples/HRP.jpg"], "Transform into a hyper-realistic face portrait.", "Hyper-Realistic-Portrait"],
476
+ [["examples/A.jpeg"], "Rotate the camera 45 degrees to the right.", "Multiple-Angles"],
477
+ [["examples/U.jpg"], "Upscale this picture to 4K resolution.", "Upscaler"],
478
+ [["examples/PP1.jpg"], "cinematic polaroid with soft grain subtle vignette gentle lighting white frame handwritten photographed by hf‪‪‬ preserving realistic texture and details", "Polaroid-Photo"],
479
+ [["examples/Z1.jpg"], "Front-right quarter view.", "Fal-Multiple-Angles"],
480
+ [["examples/MT.jpg"], "Paint with manga tone.", "Manga-Tone"],
481
+ [["examples/URP.jpg"], "ultra-realistic portrait.", "Ultra-Realistic-Portrait"],
482
+ [["examples/MN.jpg"], "Transform into Midnight Noir Eyes Spotlight.", "Midnight-Noir-Eyes-Spotlight"],
483
+ [["examples/ST1.jpg", "examples/ST2.jpg"], "Convert Image 1 to the style of Image 2.", "Style-Transfer"],
484
+ [["examples/R1.jpg"], "Change the picture to realistic photograph.", "Anything2Real"],
485
+ [["examples/UA.jpeg"], "Unblur and upscale.", "Unblur-Anything"],
486
+ [["examples/L1.jpg", "examples/L2.jpg"], "Refer to the color tone, remove the original lighting from Image 1, and relight Image 1 based on the lighting and color tone of Image 2.", "Light-Migration"],
487
+ [["examples/P1.jpg"], "Transform into anime (while preserving the background and remaining elements maintaining realism and original details.)", "Anime-V2"],
488
  ],
489
+ inputs=[images, prompt, lora_adapter],
490
  outputs=[output_image, seed],
491
  fn=infer_example,
492
  cache_examples=False,
 
497
 
498
  run_button.click(
499
  fn=infer,
500
+ inputs=[images, prompt, lora_adapter, target_size_mode, seed, randomize_seed, guidance_scale, steps],
501
  outputs=[output_image, seed]
502
  )
503