Spaces:
Runtime error
Runtime error
add lower_body options
Browse files
app.py
CHANGED
|
@@ -122,7 +122,7 @@ pipe = TryonPipeline.from_pretrained(
|
|
| 122 |
pipe.unet_encoder = UNet_Encoder
|
| 123 |
|
| 124 |
@spaces.GPU
|
| 125 |
-
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
|
| 126 |
device = "cuda"
|
| 127 |
|
| 128 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
|
@@ -154,7 +154,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
| 154 |
if is_checked:
|
| 155 |
keypoints = openpose_model(human_img.resize((384,512)))
|
| 156 |
model_parse, _ = parsing_model(human_img.resize((384,512)))
|
| 157 |
-
mask, mask_gray = get_mask_location('hd',
|
| 158 |
mask = mask.resize((768,1024))
|
| 159 |
# else:
|
| 160 |
# mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
|
@@ -284,6 +284,8 @@ with image_blocks as demo:
|
|
| 284 |
with gr.Row():
|
| 285 |
denoise_steps = gr.Number(label="Denoising Steps", minimum=20, maximum=40, value=30, step=1)
|
| 286 |
seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=42)
|
|
|
|
|
|
|
| 287 |
with gr.Row():
|
| 288 |
is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True,visible=False)
|
| 289 |
with gr.Row():
|
|
@@ -305,7 +307,7 @@ with image_blocks as demo:
|
|
| 305 |
|
| 306 |
|
| 307 |
|
| 308 |
-
try_button.click(fn=start_tryon, inputs=[imgs, garm_img, prompt, is_checked,is_checked_crop, denoise_steps, seed], outputs=[image_out], api_name='tryon')
|
| 309 |
|
| 310 |
|
| 311 |
|
|
|
|
| 122 |
pipe.unet_encoder = UNet_Encoder
|
| 123 |
|
| 124 |
@spaces.GPU
|
| 125 |
+
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed,area):
|
| 126 |
device = "cuda"
|
| 127 |
|
| 128 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
|
|
|
| 154 |
if is_checked:
|
| 155 |
keypoints = openpose_model(human_img.resize((384,512)))
|
| 156 |
model_parse, _ = parsing_model(human_img.resize((384,512)))
|
| 157 |
+
mask, mask_gray = get_mask_location('hd', area, model_parse, keypoints)
|
| 158 |
mask = mask.resize((768,1024))
|
| 159 |
# else:
|
| 160 |
# mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
|
|
|
| 284 |
with gr.Row():
|
| 285 |
denoise_steps = gr.Number(label="Denoising Steps", minimum=20, maximum=40, value=30, step=1)
|
| 286 |
seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=42)
|
| 287 |
+
with gr.Row():
|
| 288 |
+
area = gr.Dropdown(["upper_body","lower_body"], label="garment zone")
|
| 289 |
with gr.Row():
|
| 290 |
is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True,visible=False)
|
| 291 |
with gr.Row():
|
|
|
|
| 307 |
|
| 308 |
|
| 309 |
|
| 310 |
+
try_button.click(fn=start_tryon, inputs=[imgs, garm_img, prompt, is_checked,is_checked_crop, denoise_steps, seed, area], outputs=[image_out], api_name='tryon')
|
| 311 |
|
| 312 |
|
| 313 |
|