ssoxye commited on
Commit
9ffd8e3
ยท
1 Parent(s): 09dba9b

update make_depth

Browse files
Files changed (1) hide show
  1. app.py +38 -15
app.py CHANGED
@@ -1018,13 +1018,26 @@ def preprocess_mask(mask_img: Image.Image) -> Image.Image:
1018
  # if H is None or W is None:
1019
  # raise RuntimeError("Global H/W not set. Call run_one() first.")
1020
 
1021
- # depth_img = _imread_or_raise(depth_path, 0)
1022
- # contours, _ = cv2.findContours(depth_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1023
 
1024
- # filled_depth = depth_img.copy()
1025
- # cv2.drawContours(filled_depth, contours, -1, (255), thickness=cv2.FILLED)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1026
 
1027
- # filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_AREA)
1028
  # filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
1029
 
1030
  # inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
@@ -1037,6 +1050,7 @@ def preprocess_mask(mask_img: Image.Image) -> Image.Image:
1037
 
1038
  # return image_depth
1039
 
 
1040
  def make_depth(depth_path: str) -> Image.Image:
1041
  global H, W
1042
  if H is None or W is None:
@@ -1047,30 +1061,39 @@ def make_depth(depth_path: str) -> Image.Image:
1047
  # (์„ ํƒ) ์ž…๋ ฅ์ด ์™„์ „ํ•œ 0/255๊ฐ€ ์•„๋‹ˆ๋ผ๋ฉด ์ด์ง„ํ™”๋กœ ๊ณ ์ •
1048
  _, depth_bin = cv2.threshold(depth_img, 127, 255, cv2.THRESH_BINARY)
1049
 
1050
- # ์ปจํˆฌ์–ด ์ฑ„์šฐ๊ธฐ๊ฐ€ "๋‘๊บผ์›Œ ๋ณด์ž„"์˜ ์›์ธ์ผ ์ˆ˜๋„ ์žˆ์–ด, ์œ ์ง€/์ œ๊ฑฐ ์„ ํƒ ๊ฐ€๋Šฅ
1051
- # 1) ์ฑ„์šฐ๊ธฐ ์œ ์ง€ (holes ๋ฉ”์šฐ๋Š” ๋ชฉ์ ์ด๋ผ๋ฉด)
1052
  contours, _ = cv2.findContours(depth_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1053
  filled_depth = np.zeros_like(depth_bin)
1054
  cv2.drawContours(filled_depth, contours, -1, 255, thickness=cv2.FILLED)
1055
 
1056
- # 2) ์ฑ„์šฐ๊ธฐ ์ œ๊ฑฐํ•˜๊ณ  ์‹ถ์œผ๋ฉด ์œ„ 3์ค„ ๋Œ€์‹  ์ด๊ฑธ ์‚ฌ์šฉ:
1057
- # filled_depth = depth_bin
1058
-
1059
- # โœ… ๋งˆ์Šคํฌ ๋ฆฌ์‚ฌ์ด์ฆˆ๋Š” NEAREST (๊ฒฝ๊ณ„ ๋ฒˆ์ง/ํŒฝ์ฐฝ ๋А๋‚Œ ๋ฐฉ์ง€)
1060
  filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_NEAREST)
1061
 
1062
  # (์„ ํƒ) ๋ฆฌ์‚ฌ์ด์ฆˆ ํ›„์—๋„ 0/255 ๊ฐ•์ œ
1063
  _, filled_depth = cv2.threshold(filled_depth, 127, 255, cv2.THRESH_BINARY)
1064
 
1065
  filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1066
 
1067
  inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
 
1068
 
1069
  with torch.inference_mode():
1070
  image_depth = depth_estimator(inverted_image)["depth"]
1071
-
1072
- if DEBUG_SAVE:
1073
- image_depth.save("depth.png")
1074
 
1075
  return image_depth
1076
 
@@ -1400,7 +1423,7 @@ def infer_web(person_fp, sketch_fp, style_fp, prompt, steps, seed, category):
1400
 
1401
 
1402
  with gr.Blocks(title="VISTA Demo (HF Spaces)") as demo:
1403
- gr.Markdown("## ์ฒซ inference๋Š” ๋ชจ๋ธ ๋กœ๋”ฉ ๋•Œ๋ฌธ์— ์˜ค๋ž˜ ๊ฑธ๋ฆด ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.")
1404
 
1405
  category_toggle = gr.Radio(
1406
  choices=["Dress", "Upper-body", "Lower-body"],
 
1018
  # if H is None or W is None:
1019
  # raise RuntimeError("Global H/W not set. Call run_one() first.")
1020
 
1021
+ # depth_img = _imread_or_raise(depth_path, 0) # grayscale
 
1022
 
1023
+ # # (์„ ํƒ) ์ž…๋ ฅ์ด ์™„์ „ํ•œ 0/255๊ฐ€ ์•„๋‹ˆ๋ผ๋ฉด ์ด์ง„ํ™”๋กœ ๊ณ ์ •
1024
+ # _, depth_bin = cv2.threshold(depth_img, 127, 255, cv2.THRESH_BINARY)
1025
+
1026
+ # # ์ปจํˆฌ์–ด ์ฑ„์šฐ๊ธฐ๊ฐ€ "๋‘๊บผ์›Œ ๋ณด์ž„"์˜ ์›์ธ์ผ ์ˆ˜๋„ ์žˆ์–ด, ์œ ์ง€/์ œ๊ฑฐ ์„ ํƒ ๊ฐ€๋Šฅ
1027
+ # # 1) ์ฑ„์šฐ๊ธฐ ์œ ์ง€ (holes ๋ฉ”์šฐ๋Š” ๋ชฉ์ ์ด๋ผ๋ฉด)
1028
+ # contours, _ = cv2.findContours(depth_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1029
+ # filled_depth = np.zeros_like(depth_bin)
1030
+ # cv2.drawContours(filled_depth, contours, -1, 255, thickness=cv2.FILLED)
1031
+
1032
+ # # 2) ์ฑ„์šฐ๊ธฐ ์ œ๊ฑฐํ•˜๊ณ  ์‹ถ์œผ๋ฉด ์œ„ 3์ค„ ๋Œ€์‹  ์ด๊ฑธ ์‚ฌ์šฉ:
1033
+ # # filled_depth = depth_bin
1034
+
1035
+ # # โœ… ๋งˆ์Šคํฌ ๋ฆฌ์‚ฌ์ด์ฆˆ๋Š” NEAREST (๊ฒฝ๊ณ„ ๋ฒˆ์ง/ํŒฝ์ฐฝ ๋А๋‚Œ ๋ฐฉ์ง€)
1036
+ # filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_NEAREST)
1037
+
1038
+ # # (์„ ํƒ) ๋ฆฌ์‚ฌ์ด์ฆˆ ํ›„์—๋„ 0/255 ๊ฐ•์ œ
1039
+ # _, filled_depth = cv2.threshold(filled_depth, 127, 255, cv2.THRESH_BINARY)
1040
 
 
1041
  # filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
1042
 
1043
  # inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
 
1050
 
1051
  # return image_depth
1052
 
1053
+
1054
  def make_depth(depth_path: str) -> Image.Image:
1055
  global H, W
1056
  if H is None or W is None:
 
1061
  # (์„ ํƒ) ์ž…๋ ฅ์ด ์™„์ „ํ•œ 0/255๊ฐ€ ์•„๋‹ˆ๋ผ๋ฉด ์ด์ง„ํ™”๋กœ ๊ณ ์ •
1062
  _, depth_bin = cv2.threshold(depth_img, 127, 255, cv2.THRESH_BINARY)
1063
 
1064
+ # ์ปจํˆฌ์–ด ์ฑ„์šฐ๊ธฐ (holes ๋ฉ”์šฐ๋Š” ๋ชฉ์ )
 
1065
  contours, _ = cv2.findContours(depth_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
1066
  filled_depth = np.zeros_like(depth_bin)
1067
  cv2.drawContours(filled_depth, contours, -1, 255, thickness=cv2.FILLED)
1068
 
1069
+ # โœ… ๋งˆ์Šคํฌ ๋ฆฌ์‚ฌ์ด์ฆˆ๋Š” NEAREST
 
 
 
1070
  filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_NEAREST)
1071
 
1072
  # (์„ ํƒ) ๋ฆฌ์‚ฌ์ด์ฆˆ ํ›„์—๋„ 0/255 ๊ฐ•์ œ
1073
  _, filled_depth = cv2.threshold(filled_depth, 127, 255, cv2.THRESH_BINARY)
1074
 
1075
  filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
1076
+
1077
+
1078
+
1079
+ # โœ… ์—ฌ๊ธฐ์„œ ์นจ์‹(ํŒฝ์ฐฝ์˜ ๋ฐ˜๋Œ€): ํฐ์ƒ‰ ์˜์—ญ์„ ์กฐ๊ธˆ ์ค„์ž„
1080
+ erode_ksize = 5 # 3/5/7... (ํด์ˆ˜๋ก ๋” ๋งŽ์ด ์ค„์–ด๋“ฆ)
1081
+ erode_iters = 1 # 1~2 ์ถ”์ฒœ
1082
+ if erode_ksize is not None and erode_ksize > 1 and erode_iters > 0:
1083
+ if erode_ksize % 2 == 0:
1084
+ erode_ksize += 1
1085
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (erode_ksize, erode_ksize))
1086
+ filled_depth = cv2.erode(filled_depth, kernel, iterations=erode_iters)
1087
+ # ์•ˆ์ „ํ•˜๊ฒŒ ๋‹ค์‹œ ์ด์ง„ํ™”
1088
+ _, filled_depth = cv2.threshold(filled_depth, 127, 255, cv2.THRESH_BINARY)
1089
+
1090
 
1091
  inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
1092
+
1093
 
1094
  with torch.inference_mode():
1095
  image_depth = depth_estimator(inverted_image)["depth"]
1096
+
 
 
1097
 
1098
  return image_depth
1099
 
 
1423
 
1424
 
1425
  with gr.Blocks(title="VISTA Demo (HF Spaces)") as demo:
1426
+ gr.Markdown("์ฒซ inference๋Š” ๋ชจ๋ธ ๋กœ๋”ฉ ๋•Œ๋ฌธ์— ์˜ค๋ž˜ ๊ฑธ๋ฆด ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.")
1427
 
1428
  category_toggle = gr.Radio(
1429
  choices=["Dress", "Upper-body", "Lower-body"],