nami0342 commited on
Commit
a67ed00
·
1 Parent(s): 104bf05

Set default setting for tryon

Browse files
Files changed (1) hide show
  1. app.py +12 -4
app.py CHANGED
@@ -134,7 +134,14 @@ pipe = TryonPipeline.from_pretrained(
134
  pipe.unet_encoder = UNet_Encoder
135
 
136
  @spaces.GPU
137
- def start_tryon(dict,garm_img,garment_des,is_automaskchecked,is_checked_crop,denoise_steps,seed):
 
 
 
 
 
 
 
138
  device = "cuda"
139
 
140
  openpose_model.preprocessor.body_estimation.model.to(device)
@@ -142,7 +149,7 @@ def start_tryon(dict,garm_img,garment_des,is_automaskchecked,is_checked_crop,den
142
  pipe.unet_encoder.to(device)
143
 
144
  garm_img= garm_img.convert("RGB").resize((768,1024))
145
- human_img_orig = dict["background"].convert("RGB")
146
 
147
  if is_checked_crop:
148
  width, height = human_img_orig.size
@@ -175,7 +182,8 @@ def start_tryon(dict,garm_img,garment_des,is_automaskchecked,is_checked_crop,den
175
  # # mask, mask_gray = get_mask_location('hd', "dresses", model_parse, keypoints)
176
  # # mask = mask.resize((768,1024))
177
  # else:
178
- mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
 
179
  # mask = transforms.ToTensor()(mask)
180
  # mask = mask.unsqueeze(0)
181
  mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
@@ -302,7 +310,7 @@ async def vton_run(
302
  target_human = Image.open(io.BytesIO(await upload_human.read()))
303
  target_cloth = Image.open(io.BytesIO(await upload_cloth.read()))
304
 
305
- results = start_tryon(target_human, target_cloth, input_prompt, is_automaskchecked, is_checked_crop, denoise_steps, seed)
306
  return results[0]
307
 
308
 
 
134
  pipe.unet_encoder = UNet_Encoder
135
 
136
  @spaces.GPU
137
+ # For simple API
138
+ def quick_tryon(humanTarget_img,garm_img,garment_prompt):
139
+ denoise_steps = 30
140
+ seed = 42
141
+ return start_tryon(humanTarget_img, garm_img, garment_prompt, True, True, denoise_steps, seed)
142
+
143
+
144
+ def start_tryon(humanTarget_img,garm_img,garment_des,is_automaskchecked,is_checked_crop,denoise_steps,seed):
145
  device = "cuda"
146
 
147
  openpose_model.preprocessor.body_estimation.model.to(device)
 
149
  pipe.unet_encoder.to(device)
150
 
151
  garm_img= garm_img.convert("RGB").resize((768,1024))
152
+ human_img_orig = humanTarget_img.convert("RGB")
153
 
154
  if is_checked_crop:
155
  width, height = human_img_orig.size
 
182
  # # mask, mask_gray = get_mask_location('hd', "dresses", model_parse, keypoints)
183
  # # mask = mask.resize((768,1024))
184
  # else:
185
+ mask_temp_img = Image()
186
+ mask = pil_to_binary_mask(mask_temp_img.convert("RGB").resize((768, 1024)))
187
  # mask = transforms.ToTensor()(mask)
188
  # mask = mask.unsqueeze(0)
189
  mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
 
310
  target_human = Image.open(io.BytesIO(await upload_human.read()))
311
  target_cloth = Image.open(io.BytesIO(await upload_cloth.read()))
312
 
313
+ results = quick_tryon(target_human, target_cloth, input_prompt)
314
  return results[0]
315
 
316