Spaces:
Sleeping
Sleeping
Delete main_code_script.py
Browse files- main_code_script.py +0 -200
main_code_script.py
DELETED
|
@@ -1,200 +0,0 @@
|
|
| 1 |
-
# Install necessary libraries (in your requirements.txt)
|
| 2 |
-
# pillow opencv-python transformers mediapipe diffusers accelerate transformers
|
| 3 |
-
# Example install command: pip install pillow opencv-python transformers mediapipe diffusers accelerate transformers
|
| 4 |
-
from PIL import Image
|
| 5 |
-
import cv2
|
| 6 |
-
import mediapipe as mp
|
| 7 |
-
import numpy as np
|
| 8 |
-
from transformers import pipeline, CLIPImageProcessor, CLIPVisionModelWithProjection, CLIPTextModel, CLIPTextModelWithProjection, AutoTokenizer
|
| 9 |
-
from diffusers import StableDiffusionXLInpaintPipeline, DDPMScheduler, AutoencoderKL
|
| 10 |
-
import torch
|
| 11 |
-
import os
|
| 12 |
-
from torchvision import transforms
|
| 13 |
-
from typing import List
|
| 14 |
-
# from utils_mask import get_mask_location
|
| 15 |
-
# from preprocess.humanparsing.run_parsing import Parsing
|
| 16 |
-
# from preprocess.openpose.run_openpose import OpenPose
|
| 17 |
-
from detectron2.data.detection_utils import convert_PIL_to_numpy, _apply_exif_orientation
|
| 18 |
-
from torchvision.transforms.functional import to_pil_image
|
| 19 |
-
import apply_net
|
| 20 |
-
|
| 21 |
-
def pil_to_binary_mask(pil_image, threshold=0):
|
| 22 |
-
np_image = np.array(pil_image)
|
| 23 |
-
grayscale_image = Image.fromarray(np_image).convert("L")
|
| 24 |
-
binary_mask = np.array(grayscale_image) > threshold
|
| 25 |
-
mask = np.zeros(binary_mask.shape, dtype=np.uint8)
|
| 26 |
-
for i in range(binary_mask.shape[0]):
|
| 27 |
-
for j in range(binary_mask.shape[1]):
|
| 28 |
-
if binary_mask[i, j] == True:
|
| 29 |
-
mask[i, j] = 1
|
| 30 |
-
mask = (mask * 255).astype(np.uint8)
|
| 31 |
-
output_mask = Image.fromarray(mask)
|
| 32 |
-
return output_mask # [cite: 60, 61]
|
| 33 |
-
base_path = 'yisol/IDM-VTON'
|
| 34 |
-
# example_path = os.path.join(os.path.dirname(__file__), 'example')
|
| 35 |
-
unet = UNet2DConditionModel.from_pretrained(
|
| 36 |
-
base_path,
|
| 37 |
-
subfolder="unet",
|
| 38 |
-
torch_dtype=torch.float16,
|
| 39 |
-
)
|
| 40 |
-
unet.requires_grad_(False) # [cite: 61]
|
| 41 |
-
tokenizer_one = AutoTokenizer.from_pretrained(
|
| 42 |
-
base_path,
|
| 43 |
-
subfolder="tokenizer",
|
| 44 |
-
revision=None,
|
| 45 |
-
use_fast=False,
|
| 46 |
-
) # [cite: 61, 62]
|
| 47 |
-
tokenizer_two = AutoTokenizer.from_pretrained(
|
| 48 |
-
base_path,
|
| 49 |
-
subfolder="tokenizer_2",
|
| 50 |
-
revision=None,
|
| 51 |
-
use_fast=False,
|
| 52 |
-
) # [cite: 62]
|
| 53 |
-
noise_scheduler = DDPMScheduler.from_pretrained(base_path, subfolder="scheduler") # [cite: 62]
|
| 54 |
-
text_encoder_one = CLIPTextModel.from_pretrained(
|
| 55 |
-
base_path,
|
| 56 |
-
subfolder="text_encoder",
|
| 57 |
-
torch_dtype=torch.float16,
|
| 58 |
-
) # [cite: 62]
|
| 59 |
-
text_encoder_two = CLIPTextModelWithProjection.from_pretrained(
|
| 60 |
-
base_path,
|
| 61 |
-
subfolder="text_encoder_2",
|
| 62 |
-
torch_dtype=torch.float16,
|
| 63 |
-
) # [cite: 62]
|
| 64 |
-
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
|
| 65 |
-
base_path,
|
| 66 |
-
subfolder="image_encoder",
|
| 67 |
-
torch_dtype=torch.float16,
|
| 68 |
-
) # [cite: 62, 63]
|
| 69 |
-
vae = AutoencoderKL.from_pretrained(base_path,
|
| 70 |
-
subfolder="vae",
|
| 71 |
-
torch_dtype=torch.float16,
|
| 72 |
-
) # [cite: 63]
|
| 73 |
-
# "stabilityai/stable-diffusion-xl-base-1.0",
|
| 74 |
-
UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(
|
| 75 |
-
base_path,
|
| 76 |
-
subfolder="unet_encoder",
|
| 77 |
-
torch_dtype=torch.float16,
|
| 78 |
-
) # [cite: 63]
|
| 79 |
-
# parsing_model = Parsing(0)
|
| 80 |
-
# openpose_model = OpenPose(0)
|
| 81 |
-
UNet_Encoder.requires_grad_(False) # [cite: 63]
|
| 82 |
-
image_encoder.requires_grad_(False) # [cite: 63]
|
| 83 |
-
vae.requires_grad_(False) # [cite: 63]
|
| 84 |
-
unet.requires_grad_(False) # [cite: 63]
|
| 85 |
-
text_encoder_one.requires_grad_(False) # [cite: 63]
|
| 86 |
-
text_encoder_two.requires_grad_(False) # [cite: 63]
|
| 87 |
-
tensor_transfrom = transforms.Compose(
|
| 88 |
-
[
|
| 89 |
-
transforms.ToTensor(),
|
| 90 |
-
transforms.Normalize(,),
|
| 91 |
-
]
|
| 92 |
-
) # [cite: 63, 64]
|
| 93 |
-
pipe = TryonPipeline.from_pretrained(
|
| 94 |
-
base_path,
|
| 95 |
-
unet=unet,
|
| 96 |
-
vae=vae,
|
| 97 |
-
feature_extractor=CLIPImageProcessor(),
|
| 98 |
-
text_encoder=text_encoder_one,
|
| 99 |
-
text_encoder_2=text_encoder_two,
|
| 100 |
-
tokenizer=tokenizer_one,
|
| 101 |
-
tokenizer_2=tokenizer_two,
|
| 102 |
-
scheduler=noise_scheduler,
|
| 103 |
-
image_encoder=image_encoder,
|
| 104 |
-
torch_dtype=torch.float16,
|
| 105 |
-
) # [cite: 64, 65]
|
| 106 |
-
pipe.unet_encoder = UNet_Encoder # [cite: 65]
|
| 107 |
-
@spaces.GPU
|
| 108 |
-
def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed):
|
| 109 |
-
device = "cuda"
|
| 110 |
-
# openpose_model.preprocessor.body_estimation.model.to(device)
|
| 111 |
-
pipe.to(device)
|
| 112 |
-
pipe.unet_encoder.to(device)
|
| 113 |
-
garm_img = garm_img.convert("RGB").resize((768, 1024))
|
| 114 |
-
human_img_orig = dict["background"].convert("RGB")
|
| 115 |
-
if is_checked_crop:
|
| 116 |
-
width, height = human_img_orig.size
|
| 117 |
-
target_width = int(min(width, height * (3 / 4)))
|
| 118 |
-
target_height = int(min(height, width * (4 / 3)))
|
| 119 |
-
left = (width - target_width) / 2
|
| 120 |
-
top = (height - target_height) / 2
|
| 121 |
-
right = (width + target_width) / 2
|
| 122 |
-
bottom = (height + target_height) / 2
|
| 123 |
-
cropped_img = human_img_orig.crop((left, top, right, bottom))
|
| 124 |
-
crop_size = cropped_img.size
|
| 125 |
-
human_img = cropped_img.resize((384, 512)) # Reduced size for efficiency
|
| 126 |
-
else:
|
| 127 |
-
human_img = human_img_orig.resize((384, 512)) # Reduced size for efficiency
|
| 128 |
-
if is_checked:
|
| 129 |
-
# keypoints = openpose_model(human_img.resize((384, 512)))
|
| 130 |
-
# model_parse, _ = parsing_model(human_img.resize((384, 512)))
|
| 131 |
-
# mask, mask_gray = get_mask_location('hd', "upper_body", model_parse, keypoints)
|
| 132 |
-
# mask = mask.resize((768, 1024))
|
| 133 |
-
# Placeholder for mask generation (replace with your mask logic)
|
| 134 |
-
mask = Image.new('L', (768, 1024), color='white') # Example: a white mask
|
| 135 |
-
mask_gray = Image.new('RGB', (768, 1024), color='gray') # Example: a gray image
|
| 136 |
-
else:
|
| 137 |
-
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
| 138 |
-
mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
|
| 139 |
-
mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
|
| 140 |
-
human_img_arg = _apply_exif_orientation(human_img.resize((384, 512)))
|
| 141 |
-
human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
|
| 142 |
-
args = apply_net.create_argument_parser().parse_args(
|
| 143 |
-
('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm',
|
| 144 |
-
'-v', '--opts', 'MODEL.DEVICE', 'cuda'))
|
| 145 |
-
# verbosity = getattr(args, "verbosity", None)
|
| 146 |
-
pose_img = args.func(args, human_img_arg)
|
| 147 |
-
pose_img = pose_img[:, :, ::-1]
|
| 148 |
-
pose_img = Image.fromarray(pose_img).resize((768, 1024))
|
| 149 |
-
with torch.no_grad():
|
| 150 |
-
# Extract the images
|
| 151 |
-
with torch.cuda.amp.autocast():
|
| 152 |
-
with torch.no_grad():
|
| 153 |
-
prompt = "model is wearing " + garment_des
|
| 154 |
-
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
|
| 155 |
-
with torch.inference_mode():
|
| 156 |
-
(
|
| 157 |
-
prompt_embeds,
|
| 158 |
-
negative_prompt_embeds,
|
| 159 |
-
pooled_prompt_embeds,
|
| 160 |
-
negative_pooled_prompt_embeds,
|
| 161 |
-
) = pipe.encode_prompt(
|
| 162 |
-
prompt,
|
| 163 |
-
num_images_per_prompt=1,
|
| 164 |
-
do_classifier_free_guidance=True,
|
| 165 |
-
negative_prompt=negative_prompt,
|
| 166 |
-
)
|
| 167 |
-
prompt = "a photo of " + garment_des
|
| 168 |
-
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
|
| 169 |
-
if not isinstance(prompt, List):
|
| 170 |
-
prompt = [prompt] * 1
|
| 171 |
-
if not isinstance(negative_prompt, List):
|
| 172 |
-
negative_prompt = [negative_prompt] * 1
|
| 173 |
-
with torch.inference_mode():
|
| 174 |
-
(
|
| 175 |
-
prompt_embeds_c,
|
| 176 |
-
_,
|
| 177 |
-
_,
|
| 178 |
-
_,
|
| 179 |
-
) = pipe.encode_prompt(
|
| 180 |
-
prompt,
|
| 181 |
-
num_images_per_prompt=1,
|
| 182 |
-
do_classifier_free_guidance=False,
|
| 183 |
-
negative_prompt=negative_prompt,
|
| 184 |
-
)
|
| 185 |
-
pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device, torch.float16)
|
| 186 |
-
garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device, torch.float16)
|
| 187 |
-
generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
|
| 188 |
-
images = pipe(
|
| 189 |
-
prompt_embeds=prompt_embeds.to(device, torch.float16),
|
| 190 |
-
negative_prompt_embeds=negative_prompt_embeds.to(device, torch.float16),
|
| 191 |
-
pooled_prompt_embeds=pooled_prompt_embeds.to(device, torch.float16),
|
| 192 |
-
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds.to(device, torch.float16),
|
| 193 |
-
num_inference_steps=denoise_steps, # [cite: 78, 79, 80]
|
| 194 |
-
generator=generator,
|
| 195 |
-
strength=1.0,
|
| 196 |
-
pose_img=pose_img.to(device, torch.float16),
|
| 197 |
-
text_embeds_cloth=prompt_embeds_c.to(device, torch.float16),
|
| 198 |
-
cloth=garm_tensor.to(device, torch.float16),
|
| 199 |
-
mask_image=mask,
|
| 200 |
-
image=human_
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|