yamildiego commited on
Commit
5a1d390
·
1 Parent(s): ee4c9fa

size 512, rewrite fucntion

Browse files
handler.py CHANGED
@@ -47,8 +47,8 @@ class EndpointHandler():
47
 
48
  transform = Compose([
49
  Resize(
50
- width=518,
51
- height=518,
52
  resize_target=False,
53
  keep_aspect_ratio=True,
54
  ensure_multiple_of=14,
@@ -146,7 +146,7 @@ class EndpointHandler():
146
  }
147
 
148
  self.app = FaceAnalysis(name="buffalo_l", root="./", providers=["CPUExecutionProvider"])
149
- self.app.prepare(ctx_id=0, det_size=(640, 640))
150
  self.generator = torch.Generator(device=device.type).manual_seed(3)
151
 
152
 
@@ -181,8 +181,14 @@ class EndpointHandler():
181
  num_inference_steps = data.pop("num_inference_steps", 5)
182
  guidance_scale = data.pop("guidance_scale", 1.5)
183
  negative_prompt = data.pop("negative_prompt", default_negative_prompt)
184
- face_image_path = data.pop("face_image_path", "https://i.ibb.co/SKg69dD/kaifu-resize.png")
185
- pose_image_path = data.pop("pose_image_path", "https://i.ibb.co/ZSrQ8ZJ/pose.jpg")
 
 
 
 
 
 
186
 
187
  adapter_strength_ratio = 0.8
188
 
 
47
 
48
  transform = Compose([
49
  Resize(
50
+ width=512,
51
+ height=512,
52
  resize_target=False,
53
  keep_aspect_ratio=True,
54
  ensure_multiple_of=14,
 
146
  }
147
 
148
  self.app = FaceAnalysis(name="buffalo_l", root="./", providers=["CPUExecutionProvider"])
149
+ self.app.prepare(ctx_id=0, det_size=(512, 512))
150
  self.generator = torch.Generator(device=device.type).manual_seed(3)
151
 
152
 
 
181
  num_inference_steps = data.pop("num_inference_steps", 5)
182
  guidance_scale = data.pop("guidance_scale", 1.5)
183
  negative_prompt = data.pop("negative_prompt", default_negative_prompt)
184
+
185
+ # 1024px
186
+ # face_image_path = data.pop("face_image_path", "https://i.ibb.co/SKg69dD/kaifu-resize.png")
187
+ # pose_image_path = data.pop("pose_image_path", "https://i.ibb.co/ZSrQ8ZJ/pose.jpg")
188
+ # 512px
189
+ face_image_path = "https://i.ibb.co/5Rsrd2d/kaifu-resize-1.png"
190
+ pose_image_path = "https://i.ibb.co/9bP9tMb/pose-2-1.jpg"
191
+
192
 
193
  adapter_strength_ratio = 0.8
194
 
pipeline_stable_diffusion_xl_instantid_full.py CHANGED
@@ -469,34 +469,33 @@ class LongPromptWeight(object):
469
  prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
470
  return prompt_embeds
471
 
472
- def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]):
473
-
474
- stickwidth = 4
475
- limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
476
- kps = np.array(kps)
477
-
478
- w, h = image_pil.size
479
- out_img = np.zeros([h, w, 3])
480
-
481
- for i in range(len(limbSeq)):
482
- index = limbSeq[i]
483
- color = color_list[index[0]]
484
-
485
- x = kps[index][:, 0]
486
- y = kps[index][:, 1]
487
- length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
488
- angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
489
- polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
490
- out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
491
- out_img = (out_img * 0.6).astype(np.uint8)
492
-
493
- for idx_kp, kp in enumerate(kps):
494
- color = color_list[idx_kp]
495
- x, y = kp
496
- out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
497
-
498
- out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8))
499
- return out_img_pil
500
 
501
  class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
502
 
@@ -527,7 +526,7 @@ class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
527
  def set_image_proj_model(self, model_ckpt, image_emb_dim=512, num_tokens=16):
528
 
529
  image_proj_model = Resampler(
530
- dim=1280,
531
  depth=4,
532
  dim_head=64,
533
  heads=20,
 
469
  prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
470
  return prompt_embeds
471
 
472
+ def draw_kps_optimized(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]):
473
+ stickwidth = 4
474
+ limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
475
+ kps = np.array(kps)
476
+
477
+ w, h = image_pil.size
478
+ out_img = np.zeros([h, w, 3], dtype=np.uint8)
479
+
480
+ for i, (start, end) in enumerate(limbSeq):
481
+ color = color_list[i % len(color_list)]
482
+
483
+ x = kps[[start, end], 0]
484
+ y = kps[[start, end], 1]
485
+
486
+ center = tuple(np.round(np.mean([x, y], axis=1)).astype(int))
487
+ length = int(np.hypot(x[0] - x[1], y[0] - y[1]) / 2)
488
+ angle = int(np.degrees(np.arctan2(y[0] - y[1], x[0] - x[1])))
489
+
490
+ polygon = cv2.ellipse2Poly(center, (length, stickwidth), angle, 0, 360, 1)
491
+ cv2.fillConvexPoly(out_img, polygon, color)
492
+
493
+ for idx_kp, (x, y) in enumerate(kps):
494
+ color = color_list[idx_kp % len(color_list)]
495
+ cv2.circle(out_img, (int(x), int(y)), 10, color, thickness=-1)
496
+
497
+ out_img_pil = PIL.Image.fromarray(out_img)
498
+ return out_img_pil
 
499
 
500
  class StableDiffusionXLInstantIDPipeline(StableDiffusionXLControlNetPipeline):
501
 
 
526
  def set_image_proj_model(self, model_ckpt, image_emb_dim=512, num_tokens=16):
527
 
528
  image_proj_model = Resampler(
529
+ dim=512,
530
  depth=4,
531
  dim_head=64,
532
  heads=20,