|
|
from PIL import Image |
|
|
import numpy as np |
|
|
import os |
|
|
import torch |
|
|
from datetime import datetime |
|
|
import time |
|
|
import collections |
|
|
|
|
|
|
|
|
from utils import init_weight_dtype, resize_and_crop, resize_and_padding |
|
|
from model.pipeline import CatVTONPipeline |
|
|
from model.cloth_masker import AutoMasker, vis_mask |
|
|
from diffusers.image_processor import VaeImageProcessor |
|
|
from huggingface_hub import snapshot_download |
|
|
|
|
|
|
|
|
|
|
|
def get_files(folder_path, extensions=['py', 'png', 'JPEG']): |
|
|
if isinstance(extensions, str): |
|
|
extensions = [extensions] |
|
|
else: |
|
|
extensions = [ex.lower() for ex in extensions] |
|
|
result = [x for x in os.listdir(folder_path) if x.split('.')[-1].lower() in extensions] |
|
|
return result |
|
|
|
|
|
base_model_path='booksforcharlie/stable-diffusion-inpainting' |
|
|
allow_tf32=True |
|
|
mixed_precision='bf16' |
|
|
resume_path='zhengchong/CatVTON' |
|
|
tmp_folder = "/workspace/rs" |
|
|
|
|
|
automasker = AutoMasker( |
|
|
densepose_ckpt=os.path.join(repo_path, "DensePose"), |
|
|
schp_ckpt=os.path.join(repo_path, "SCHP"), |
|
|
device='cuda', |
|
|
) |
|
|
|
|
|
pipeline = CatVTONPipeline(base_ckpt=base_model_path, |
|
|
attn_ckpt=repo_path, |
|
|
attn_ckpt_version="mix", |
|
|
weight_dtype=init_weight_dtype(mixed_precision), |
|
|
use_tf32=allow_tf32, |
|
|
device='cuda') |
|
|
|
|
|
mask_processor = VaeImageProcessor(vae_scale_factor=8, do_normalize=False, do_binarize=True, do_convert_grayscale=True) |
|
|
|
|
|
def image_grid(imgs, rows, cols): |
|
|
assert len(imgs) == rows * cols |
|
|
|
|
|
w, h = imgs[0].size |
|
|
grid = Image.new("RGB", size=(cols * w, rows * h)) |
|
|
|
|
|
for i, img in enumerate(imgs): |
|
|
grid.paste(img, box=(i % cols * w, i // cols * h)) |
|
|
return grid |
|
|
|
|
|
def inference( |
|
|
person_image, |
|
|
mask_image, |
|
|
cloth_image, |
|
|
cloth_type, |
|
|
image_size=(1024, 768), |
|
|
num_inference_steps=50, |
|
|
guidance_scale=2.5, |
|
|
seed=42, |
|
|
show_type="result only" |
|
|
): |
|
|
start_time = time.time() |
|
|
height, width = image_size |
|
|
|
|
|
if len(np.unique(np.array(mask_image))) == 1: |
|
|
mask_image = None |
|
|
else: |
|
|
mask_image = np.array(mask_image) |
|
|
mask_image[mask_image > 0] = 255 |
|
|
mask_image = Image.fromarray(mask_image) |
|
|
|
|
|
date_str = datetime.now().strftime("%Y%m%d%H%M%S") |
|
|
result_save_path = os.path.join(tmp_folder, date_str[:8], date_str[8:] + ".png") |
|
|
if not os.path.exists(os.path.join(tmp_folder, date_str[:8])): |
|
|
os.makedirs(os.path.join(tmp_folder, date_str[:8])) |
|
|
|
|
|
generator = None |
|
|
if seed != -1: |
|
|
generator = torch.Generator(device='cuda').manual_seed(seed) |
|
|
|
|
|
person_image = resize_and_crop(person_image, (width, height)) |
|
|
cloth_image = resize_and_padding(cloth_image, (width, height)) |
|
|
|
|
|
|
|
|
if mask_image is not None: |
|
|
mask_image = resize_and_crop(mask_image, (width, height)) |
|
|
else: |
|
|
mask_image = automasker( |
|
|
person_image, |
|
|
cloth_type |
|
|
)['mask'] |
|
|
|
|
|
mask_image = mask_processor.blur(mask_image, blur_factor=9) |
|
|
|
|
|
result_image = pipeline( |
|
|
image=person_image, |
|
|
condition_image=cloth_image, |
|
|
mask=mask_image, |
|
|
num_inference_steps=num_inference_steps, |
|
|
guidance_scale=guidance_scale, |
|
|
generator=generator |
|
|
)[0] |
|
|
|
|
|
print("FPS: ", 1.0 / (time.time() - start_time)) |
|
|
|
|
|
|
|
|
masked_person = vis_mask(person_image, mask_image) |
|
|
save_result_image = image_grid([person_image, masked_person, cloth_image, result_image], 1, 4) |
|
|
save_result_image.save(result_save_path) |
|
|
|
|
|
if show_type == "result only": |
|
|
return result_image |
|
|
else: |
|
|
width, height = person_image.size |
|
|
if show_type == "input & result": |
|
|
condition_width = width // 2 |
|
|
conditions = image_grid([person_image, cloth_image], 2, 1) |
|
|
else: |
|
|
condition_width = width // 3 |
|
|
conditions = image_grid([person_image, masked_person , cloth_image], 3, 1) |
|
|
conditions = conditions.resize((condition_width, height), Image.NEAREST) |
|
|
new_result_image = Image.new("RGB", (width + condition_width + 5, height)) |
|
|
new_result_image.paste(conditions, (0, 0)) |
|
|
new_result_image.paste(result_image, (condition_width + 5, 0)) |
|
|
return new_result_image |
|
|
|
|
|
person_path = '/workspace/data/person' |
|
|
mask_path = None |
|
|
cloth_path = '/workspace/data/cloth' |
|
|
result_path = '/workspace/data/result' |
|
|
|
|
|
|
|
|
if not os.path.isfile(person_path): |
|
|
os.makedirs(person_path, exist_ok=True) |
|
|
person_files = get_files(person_path, extensions=['png', 'jpeg', 'jpg', 'webp']) |
|
|
|
|
|
if mask_path: |
|
|
os.makedirs(mask_path, exist_ok=True) |
|
|
mask_files = [os.path.join(mask_path, f'{os.path.splitext(pf)[0]}.png') for pf in person_files] |
|
|
else: |
|
|
mask_files = [mask_path] * len(person_files) |
|
|
person_files = [os.path.join(person_path, pf) for pf in person_files] if person_files else [] |
|
|
else: |
|
|
person_files = [person_path] |
|
|
mask_files = [mask_path] * len(person_files) |
|
|
|
|
|
|
|
|
if not os.path.isfile(cloth_path): |
|
|
os.makedirs(cloth_path, exist_ok=True) |
|
|
cloth_files = get_files(cloth_path, extensions=['png', 'jpeg', 'jpg', 'webp']) |
|
|
cloth_files = [os.path.join(cloth_path, cf) for cf in cloth_files] if cloth_files else [] |
|
|
else: |
|
|
cloth_files = [cloth_path] |
|
|
|
|
|
|
|
|
if not os.path.isdir(result_path): |
|
|
os.makedirs(result_path, exist_ok=True) |
|
|
|
|
|
repo_path = snapshot_download(repo_id=resume_path) |
|
|
|
|
|
cloth_type = "upper" |
|
|
image_size = (1024, 768) |
|
|
num_inference_steps = 50 |
|
|
guidance_scale = 2.5 |
|
|
seed = 42 |
|
|
show_type = "all" |
|
|
|
|
|
|
|
|
for person_file, mask_file in zip(person_files, mask_files): |
|
|
for cloth_file in cloth_files: |
|
|
person_instance = Image.open(person_file).convert("RGB") |
|
|
mask_instance = Image.open(mask_file).convert("L") if mask_file else None |
|
|
cloth_instance = Image.open(cloth_file).convert("RGB") |
|
|
|
|
|
vton_img = inference(person_instance, |
|
|
mask_instance, |
|
|
cloth_instance, |
|
|
cloth_type, |
|
|
image_size, |
|
|
num_inference_steps, |
|
|
guidance_scale, |
|
|
seed, |
|
|
show_type) |
|
|
vton_img.save(os.path.join(result_path, f'{datetime.now().strftime("%Y%m%d%M%S")}.jpg')) |