Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -59,8 +59,8 @@ def parse_args():
|
|
| 59 |
),
|
| 60 |
)
|
| 61 |
parser.add_argument(
|
| 62 |
-
"--repaint",
|
| 63 |
-
action="store_true",
|
| 64 |
help="Whether to repaint the result image with the original background."
|
| 65 |
)
|
| 66 |
parser.add_argument(
|
|
@@ -79,11 +79,11 @@ def parse_args():
|
|
| 79 |
choices=["no", "fp16", "bf16"],
|
| 80 |
help=(
|
| 81 |
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
| 82 |
-
" 1.10
|
| 83 |
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
|
| 84 |
),
|
| 85 |
)
|
| 86 |
-
|
| 87 |
args = parser.parse_args()
|
| 88 |
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
| 89 |
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
|
@@ -103,6 +103,7 @@ def image_grid(imgs, rows, cols):
|
|
| 103 |
|
| 104 |
args = parse_args()
|
| 105 |
repo_path = snapshot_download(repo_id=args.resume_path)
|
|
|
|
| 106 |
# Pipeline
|
| 107 |
pipeline = CatVTONPipeline(
|
| 108 |
base_ckpt=args.base_model_path,
|
|
@@ -112,12 +113,18 @@ pipeline = CatVTONPipeline(
|
|
| 112 |
use_tf32=args.allow_tf32,
|
| 113 |
device='cuda'
|
| 114 |
)
|
|
|
|
| 115 |
# AutoMasker
|
| 116 |
-
mask_processor = VaeImageProcessor(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
automasker = AutoMasker(
|
| 118 |
densepose_ckpt=os.path.join(repo_path, "DensePose"),
|
| 119 |
schp_ckpt=os.path.join(repo_path, "SCHP"),
|
| 120 |
-
device='cuda',
|
| 121 |
)
|
| 122 |
|
| 123 |
@spaces.GPU(duration=120)
|
|
@@ -130,8 +137,11 @@ def submit_function(
|
|
| 130 |
seed,
|
| 131 |
show_type
|
| 132 |
):
|
|
|
|
| 133 |
person_image, mask = person_image["background"], person_image["layers"][0]
|
| 134 |
mask = Image.open(mask).convert("L")
|
|
|
|
|
|
|
| 135 |
if len(np.unique(np.array(mask))) == 1:
|
| 136 |
mask = None
|
| 137 |
else:
|
|
@@ -153,8 +163,8 @@ def submit_function(
|
|
| 153 |
cloth_image = Image.open(cloth_image).convert("RGB")
|
| 154 |
person_image = resize_and_crop(person_image, (args.width, args.height))
|
| 155 |
cloth_image = resize_and_padding(cloth_image, (args.width, args.height))
|
| 156 |
-
|
| 157 |
-
#
|
| 158 |
if mask is not None:
|
| 159 |
mask = resize_and_crop(mask, (args.width, args.height))
|
| 160 |
else:
|
|
@@ -173,11 +183,12 @@ def submit_function(
|
|
| 173 |
guidance_scale=guidance_scale,
|
| 174 |
generator=generator
|
| 175 |
)[0]
|
| 176 |
-
|
| 177 |
-
# Post-process
|
| 178 |
masked_person = vis_mask(person_image, mask)
|
| 179 |
save_result_image = image_grid([person_image, masked_person, cloth_image, result_image], 1, 4)
|
| 180 |
save_result_image.save(result_save_path)
|
|
|
|
| 181 |
if show_type == "result only":
|
| 182 |
return result_image
|
| 183 |
else:
|
|
@@ -187,17 +198,18 @@ def submit_function(
|
|
| 187 |
conditions = image_grid([person_image, cloth_image], 2, 1)
|
| 188 |
else:
|
| 189 |
condition_width = width // 3
|
| 190 |
-
conditions = image_grid([person_image, masked_person
|
|
|
|
| 191 |
conditions = conditions.resize((condition_width, height), Image.NEAREST)
|
| 192 |
new_result_image = Image.new("RGB", (width + condition_width + 5, height))
|
| 193 |
new_result_image.paste(conditions, (0, 0))
|
| 194 |
new_result_image.paste(result_image, (condition_width + 5, 0))
|
| 195 |
-
|
| 196 |
|
| 197 |
def person_example_fn(image_path):
|
| 198 |
return image_path
|
| 199 |
|
| 200 |
-
# Custom CSS
|
| 201 |
css = """
|
| 202 |
footer {visibility: hidden}
|
| 203 |
|
|
@@ -322,10 +334,11 @@ def app_gradio():
|
|
| 322 |
Transform your look with AI-powered virtual clothing try-on!
|
| 323 |
"""
|
| 324 |
)
|
| 325 |
-
|
| 326 |
with gr.Row():
|
| 327 |
with gr.Column(scale=1, min_width=350):
|
| 328 |
-
|
|
|
|
| 329 |
gr.Markdown("### 📸 Upload Images")
|
| 330 |
with gr.Row():
|
| 331 |
image_path = gr.Image(
|
|
@@ -371,7 +384,7 @@ def app_gradio():
|
|
| 371 |
</div>
|
| 372 |
"""
|
| 373 |
)
|
| 374 |
-
|
| 375 |
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 376 |
num_inference_steps = gr.Slider(
|
| 377 |
label="Quality Level",
|
|
@@ -411,14 +424,13 @@ def app_gradio():
|
|
| 411 |
elem_classes="image-container"
|
| 412 |
)
|
| 413 |
with gr.Row():
|
| 414 |
-
# Photo Examples
|
| 415 |
root_path = "resource/demo/example"
|
| 416 |
with gr.Column():
|
| 417 |
gr.Markdown("#### 👤 Model Examples")
|
| 418 |
men_exm = gr.Examples(
|
| 419 |
examples=[
|
| 420 |
-
os.path.join(root_path, "person", "men",
|
| 421 |
-
for
|
| 422 |
],
|
| 423 |
examples_per_page=4,
|
| 424 |
inputs=image_path,
|
|
@@ -427,8 +439,8 @@ def app_gradio():
|
|
| 427 |
)
|
| 428 |
women_exm = gr.Examples(
|
| 429 |
examples=[
|
| 430 |
-
os.path.join(root_path, "person", "women",
|
| 431 |
-
for
|
| 432 |
],
|
| 433 |
examples_per_page=4,
|
| 434 |
inputs=image_path,
|
|
@@ -438,13 +450,13 @@ def app_gradio():
|
|
| 438 |
gr.Markdown(
|
| 439 |
'<div class="info-text">Model examples courtesy of <a href="https://huggingface.co/spaces/levihsu/OOTDiffusion">OOTDiffusion</a> and <a href="https://www.outfitanyone.org">OutfitAnyone</a></div>'
|
| 440 |
)
|
| 441 |
-
|
| 442 |
with gr.Column():
|
| 443 |
gr.Markdown("#### 👕 Clothing Examples")
|
| 444 |
condition_upper_exm = gr.Examples(
|
| 445 |
examples=[
|
| 446 |
-
os.path.join(root_path, "condition", "upper",
|
| 447 |
-
for
|
| 448 |
],
|
| 449 |
examples_per_page=4,
|
| 450 |
inputs=cloth_image,
|
|
@@ -453,8 +465,8 @@ def app_gradio():
|
|
| 453 |
)
|
| 454 |
condition_overall_exm = gr.Examples(
|
| 455 |
examples=[
|
| 456 |
-
os.path.join(root_path, "condition", "overall",
|
| 457 |
-
for
|
| 458 |
],
|
| 459 |
examples_per_page=4,
|
| 460 |
inputs=cloth_image,
|
|
@@ -463,8 +475,8 @@ def app_gradio():
|
|
| 463 |
)
|
| 464 |
condition_person_exm = gr.Examples(
|
| 465 |
examples=[
|
| 466 |
-
os.path.join(root_path, "condition", "person",
|
| 467 |
-
for
|
| 468 |
],
|
| 469 |
examples_per_page=4,
|
| 470 |
inputs=cloth_image,
|
|
@@ -494,7 +506,7 @@ def app_gradio():
|
|
| 494 |
],
|
| 495 |
result_image,
|
| 496 |
)
|
| 497 |
-
|
| 498 |
gr.Markdown(
|
| 499 |
"""
|
| 500 |
### 💡 Tips & Instructions
|
|
@@ -507,8 +519,8 @@ def app_gradio():
|
|
| 507 |
For best results, use clear, front-facing images with good lighting.
|
| 508 |
"""
|
| 509 |
)
|
| 510 |
-
|
| 511 |
demo.queue().launch(share=True, show_error=True)
|
| 512 |
|
| 513 |
if __name__ == "__main__":
|
| 514 |
-
app_gradio()
|
|
|
|
| 59 |
),
|
| 60 |
)
|
| 61 |
parser.add_argument(
|
| 62 |
+
"--repaint",
|
| 63 |
+
action="store_true",
|
| 64 |
help="Whether to repaint the result image with the original background."
|
| 65 |
)
|
| 66 |
parser.add_argument(
|
|
|
|
| 79 |
choices=["no", "fp16", "bf16"],
|
| 80 |
help=(
|
| 81 |
"Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
|
| 82 |
+
" 1.10 and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
|
| 83 |
" flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
|
| 84 |
),
|
| 85 |
)
|
| 86 |
+
|
| 87 |
args = parser.parse_args()
|
| 88 |
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
|
| 89 |
if env_local_rank != -1 and env_local_rank != args.local_rank:
|
|
|
|
| 103 |
|
| 104 |
args = parse_args()
|
| 105 |
repo_path = snapshot_download(repo_id=args.resume_path)
|
| 106 |
+
|
| 107 |
# Pipeline
|
| 108 |
pipeline = CatVTONPipeline(
|
| 109 |
base_ckpt=args.base_model_path,
|
|
|
|
| 113 |
use_tf32=args.allow_tf32,
|
| 114 |
device='cuda'
|
| 115 |
)
|
| 116 |
+
|
| 117 |
# AutoMasker
|
| 118 |
+
mask_processor = VaeImageProcessor(
|
| 119 |
+
vae_scale_factor=8,
|
| 120 |
+
do_normalize=False,
|
| 121 |
+
do_binarize=True,
|
| 122 |
+
do_convert_grayscale=True
|
| 123 |
+
)
|
| 124 |
automasker = AutoMasker(
|
| 125 |
densepose_ckpt=os.path.join(repo_path, "DensePose"),
|
| 126 |
schp_ckpt=os.path.join(repo_path, "SCHP"),
|
| 127 |
+
device='cuda',
|
| 128 |
)
|
| 129 |
|
| 130 |
@spaces.GPU(duration=120)
|
|
|
|
| 137 |
seed,
|
| 138 |
show_type
|
| 139 |
):
|
| 140 |
+
# person_image 객체에서 background와 layers[0]을 분리
|
| 141 |
person_image, mask = person_image["background"], person_image["layers"][0]
|
| 142 |
mask = Image.open(mask).convert("L")
|
| 143 |
+
|
| 144 |
+
# 만약 마스크가 전부 0(검정)이면 None 처리
|
| 145 |
if len(np.unique(np.array(mask))) == 1:
|
| 146 |
mask = None
|
| 147 |
else:
|
|
|
|
| 163 |
cloth_image = Image.open(cloth_image).convert("RGB")
|
| 164 |
person_image = resize_and_crop(person_image, (args.width, args.height))
|
| 165 |
cloth_image = resize_and_padding(cloth_image, (args.width, args.height))
|
| 166 |
+
|
| 167 |
+
# If user didn't draw a mask
|
| 168 |
if mask is not None:
|
| 169 |
mask = resize_and_crop(mask, (args.width, args.height))
|
| 170 |
else:
|
|
|
|
| 183 |
guidance_scale=guidance_scale,
|
| 184 |
generator=generator
|
| 185 |
)[0]
|
| 186 |
+
|
| 187 |
+
# Post-process & Save
|
| 188 |
masked_person = vis_mask(person_image, mask)
|
| 189 |
save_result_image = image_grid([person_image, masked_person, cloth_image, result_image], 1, 4)
|
| 190 |
save_result_image.save(result_save_path)
|
| 191 |
+
|
| 192 |
if show_type == "result only":
|
| 193 |
return result_image
|
| 194 |
else:
|
|
|
|
| 198 |
conditions = image_grid([person_image, cloth_image], 2, 1)
|
| 199 |
else:
|
| 200 |
condition_width = width // 3
|
| 201 |
+
conditions = image_grid([person_image, masked_person, cloth_image], 3, 1)
|
| 202 |
+
|
| 203 |
conditions = conditions.resize((condition_width, height), Image.NEAREST)
|
| 204 |
new_result_image = Image.new("RGB", (width + condition_width + 5, height))
|
| 205 |
new_result_image.paste(conditions, (0, 0))
|
| 206 |
new_result_image.paste(result_image, (condition_width + 5, 0))
|
| 207 |
+
return new_result_image
|
| 208 |
|
| 209 |
def person_example_fn(image_path):
|
| 210 |
return image_path
|
| 211 |
|
| 212 |
+
# Custom CSS
|
| 213 |
css = """
|
| 214 |
footer {visibility: hidden}
|
| 215 |
|
|
|
|
| 334 |
Transform your look with AI-powered virtual clothing try-on!
|
| 335 |
"""
|
| 336 |
)
|
| 337 |
+
|
| 338 |
with gr.Row():
|
| 339 |
with gr.Column(scale=1, min_width=350):
|
| 340 |
+
# (수정) 아래 라인을 gr.Box() -> gr.Group()으로 변경
|
| 341 |
+
with gr.Group():
|
| 342 |
gr.Markdown("### 📸 Upload Images")
|
| 343 |
with gr.Row():
|
| 344 |
image_path = gr.Image(
|
|
|
|
| 384 |
</div>
|
| 385 |
"""
|
| 386 |
)
|
| 387 |
+
|
| 388 |
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 389 |
num_inference_steps = gr.Slider(
|
| 390 |
label="Quality Level",
|
|
|
|
| 424 |
elem_classes="image-container"
|
| 425 |
)
|
| 426 |
with gr.Row():
|
|
|
|
| 427 |
root_path = "resource/demo/example"
|
| 428 |
with gr.Column():
|
| 429 |
gr.Markdown("#### 👤 Model Examples")
|
| 430 |
men_exm = gr.Examples(
|
| 431 |
examples=[
|
| 432 |
+
os.path.join(root_path, "person", "men", file)
|
| 433 |
+
for file in os.listdir(os.path.join(root_path, "person", "men"))
|
| 434 |
],
|
| 435 |
examples_per_page=4,
|
| 436 |
inputs=image_path,
|
|
|
|
| 439 |
)
|
| 440 |
women_exm = gr.Examples(
|
| 441 |
examples=[
|
| 442 |
+
os.path.join(root_path, "person", "women", file)
|
| 443 |
+
for file in os.listdir(os.path.join(root_path, "person", "women"))
|
| 444 |
],
|
| 445 |
examples_per_page=4,
|
| 446 |
inputs=image_path,
|
|
|
|
| 450 |
gr.Markdown(
|
| 451 |
'<div class="info-text">Model examples courtesy of <a href="https://huggingface.co/spaces/levihsu/OOTDiffusion">OOTDiffusion</a> and <a href="https://www.outfitanyone.org">OutfitAnyone</a></div>'
|
| 452 |
)
|
| 453 |
+
|
| 454 |
with gr.Column():
|
| 455 |
gr.Markdown("#### 👕 Clothing Examples")
|
| 456 |
condition_upper_exm = gr.Examples(
|
| 457 |
examples=[
|
| 458 |
+
os.path.join(root_path, "condition", "upper", file)
|
| 459 |
+
for file in os.listdir(os.path.join(root_path, "condition", "upper"))
|
| 460 |
],
|
| 461 |
examples_per_page=4,
|
| 462 |
inputs=cloth_image,
|
|
|
|
| 465 |
)
|
| 466 |
condition_overall_exm = gr.Examples(
|
| 467 |
examples=[
|
| 468 |
+
os.path.join(root_path, "condition", "overall", file)
|
| 469 |
+
for file in os.listdir(os.path.join(root_path, "condition", "overall"))
|
| 470 |
],
|
| 471 |
examples_per_page=4,
|
| 472 |
inputs=cloth_image,
|
|
|
|
| 475 |
)
|
| 476 |
condition_person_exm = gr.Examples(
|
| 477 |
examples=[
|
| 478 |
+
os.path.join(root_path, "condition", "person", file)
|
| 479 |
+
for file in os.listdir(os.path.join(root_path, "condition", "person"))
|
| 480 |
],
|
| 481 |
examples_per_page=4,
|
| 482 |
inputs=cloth_image,
|
|
|
|
| 506 |
],
|
| 507 |
result_image,
|
| 508 |
)
|
| 509 |
+
|
| 510 |
gr.Markdown(
|
| 511 |
"""
|
| 512 |
### 💡 Tips & Instructions
|
|
|
|
| 519 |
For best results, use clear, front-facing images with good lighting.
|
| 520 |
"""
|
| 521 |
)
|
| 522 |
+
|
| 523 |
demo.queue().launch(share=True, show_error=True)
|
| 524 |
|
| 525 |
if __name__ == "__main__":
|
| 526 |
+
app_gradio()
|