ssoxye commited on
Commit
09dba9b
ยท
1 Parent(s): a6778fb

update make_depth

Browse files
Files changed (1) hide show
  1. app.py +44 -6
app.py CHANGED
@@ -1013,18 +1013,55 @@ def preprocess_mask(mask_img: Image.Image) -> Image.Image:
1013
  return Image.fromarray(m, mode="L").convert("RGB")
1014
 
1015
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016
  def make_depth(depth_path: str) -> Image.Image:
1017
  global H, W
1018
  if H is None or W is None:
1019
  raise RuntimeError("Global H/W not set. Call run_one() first.")
1020
 
1021
- depth_img = _imread_or_raise(depth_path, 0)
1022
- contours, _ = cv2.findContours(depth_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1023
 
1024
- filled_depth = depth_img.copy()
1025
- cv2.drawContours(filled_depth, contours, -1, (255), thickness=cv2.FILLED)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1026
 
1027
- filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_AREA)
1028
  filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
1029
 
1030
  inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
@@ -1038,6 +1075,7 @@ def make_depth(depth_path: str) -> Image.Image:
1038
  return image_depth
1039
 
1040
 
 
1041
  def _edges_from_parsing(parsing_img: Image.Image) -> np.ndarray:
1042
  m = np.array(parsing_img.convert("L"), dtype=np.uint8)
1043
  _, m_bin = cv2.threshold(m, 127, 255, cv2.THRESH_BINARY)
@@ -1362,7 +1400,7 @@ def infer_web(person_fp, sketch_fp, style_fp, prompt, steps, seed, category):
1362
 
1363
 
1364
  with gr.Blocks(title="VISTA Demo (HF Spaces)") as demo:
1365
- gr.Markdown("## VISTA Demo\nperson ํ•„์ˆ˜, style/sketch(guide)๋Š” ์„ ํƒ์ž…๋‹ˆ๋‹ค.")
1366
 
1367
  category_toggle = gr.Radio(
1368
  choices=["Dress", "Upper-body", "Lower-body"],
 
1013
  return Image.fromarray(m, mode="L").convert("RGB")
1014
 
1015
 
1016
+ # def make_depth(depth_path: str) -> Image.Image:
1017
+ # global H, W
1018
+ # if H is None or W is None:
1019
+ # raise RuntimeError("Global H/W not set. Call run_one() first.")
1020
+
1021
+ # depth_img = _imread_or_raise(depth_path, 0)
1022
+ # contours, _ = cv2.findContours(depth_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1023
+
1024
+ # filled_depth = depth_img.copy()
1025
+ # cv2.drawContours(filled_depth, contours, -1, (255), thickness=cv2.FILLED)
1026
+
1027
+ # filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_AREA)
1028
+ # filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
1029
+
1030
+ # inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
1031
+
1032
+ # with torch.inference_mode():
1033
+ # image_depth = depth_estimator(inverted_image)["depth"]
1034
+
1035
+ # if DEBUG_SAVE:
1036
+ # image_depth.save("depth.png")
1037
+
1038
+ # return image_depth
1039
+
1040
  def make_depth(depth_path: str) -> Image.Image:
1041
  global H, W
1042
  if H is None or W is None:
1043
  raise RuntimeError("Global H/W not set. Call run_one() first.")
1044
 
1045
+ depth_img = _imread_or_raise(depth_path, 0) # grayscale
 
1046
 
1047
+ # (์„ ํƒ) ์ž…๋ ฅ์ด ์™„์ „ํ•œ 0/255๊ฐ€ ์•„๋‹ˆ๋ผ๋ฉด ์ด์ง„ํ™”๋กœ ๊ณ ์ •
1048
+ _, depth_bin = cv2.threshold(depth_img, 127, 255, cv2.THRESH_BINARY)
1049
+
1050
+ # ์ปจํˆฌ์–ด ์ฑ„์šฐ๊ธฐ๊ฐ€ "๋‘๊บผ์›Œ ๋ณด์ž„"์˜ ์›์ธ์ผ ์ˆ˜๋„ ์žˆ์–ด, ์œ ์ง€/์ œ๊ฑฐ ์„ ํƒ ๊ฐ€๋Šฅ
1051
+ # 1) ์ฑ„์šฐ๊ธฐ ์œ ์ง€ (holes ๋ฉ”์šฐ๋Š” ๋ชฉ์ ์ด๋ผ๋ฉด)
1052
+ contours, _ = cv2.findContours(depth_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1053
+ filled_depth = np.zeros_like(depth_bin)
1054
+ cv2.drawContours(filled_depth, contours, -1, 255, thickness=cv2.FILLED)
1055
+
1056
+ # 2) ์ฑ„์šฐ๊ธฐ ์ œ๊ฑฐํ•˜๊ณ  ์‹ถ์œผ๋ฉด ์œ„ 3์ค„ ๋Œ€์‹  ์ด๊ฑธ ์‚ฌ์šฉ:
1057
+ # filled_depth = depth_bin
1058
+
1059
+ # โœ… ๋งˆ์Šคํฌ ๋ฆฌ์‚ฌ์ด์ฆˆ๋Š” NEAREST (๊ฒฝ๊ณ„ ๋ฒˆ์ง/ํŒฝ์ฐฝ ๋А๋‚Œ ๋ฐฉ์ง€)
1060
+ filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_NEAREST)
1061
+
1062
+ # (์„ ํƒ) ๋ฆฌ์‚ฌ์ด์ฆˆ ํ›„์—๋„ 0/255 ๊ฐ•์ œ
1063
+ _, filled_depth = cv2.threshold(filled_depth, 127, 255, cv2.THRESH_BINARY)
1064
 
 
1065
  filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
1066
 
1067
  inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
 
1075
  return image_depth
1076
 
1077
 
1078
+
1079
  def _edges_from_parsing(parsing_img: Image.Image) -> np.ndarray:
1080
  m = np.array(parsing_img.convert("L"), dtype=np.uint8)
1081
  _, m_bin = cv2.threshold(m, 127, 255, cv2.THRESH_BINARY)
 
1400
 
1401
 
1402
  with gr.Blocks(title="VISTA Demo (HF Spaces)") as demo:
1403
+ gr.Markdown("## ์ฒซ inference๋Š” ๋ชจ๋ธ ๋กœ๋”ฉ ๋•Œ๋ฌธ์— ์˜ค๋ž˜ ๊ฑธ๋ฆด ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.")
1404
 
1405
  category_toggle = gr.Radio(
1406
  choices=["Dress", "Upper-body", "Lower-body"],