update make_depth
Browse files
app.py
CHANGED
|
@@ -1013,18 +1013,55 @@ def preprocess_mask(mask_img: Image.Image) -> Image.Image:
|
|
| 1013 |
return Image.fromarray(m, mode="L").convert("RGB")
|
| 1014 |
|
| 1015 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1016 |
def make_depth(depth_path: str) -> Image.Image:
|
| 1017 |
global H, W
|
| 1018 |
if H is None or W is None:
|
| 1019 |
raise RuntimeError("Global H/W not set. Call run_one() first.")
|
| 1020 |
|
| 1021 |
-
depth_img = _imread_or_raise(depth_path, 0)
|
| 1022 |
-
contours, _ = cv2.findContours(depth_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 1023 |
|
| 1024 |
-
|
| 1025 |
-
cv2.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1026 |
|
| 1027 |
-
filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_AREA)
|
| 1028 |
filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
|
| 1029 |
|
| 1030 |
inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
|
|
@@ -1038,6 +1075,7 @@ def make_depth(depth_path: str) -> Image.Image:
|
|
| 1038 |
return image_depth
|
| 1039 |
|
| 1040 |
|
|
|
|
| 1041 |
def _edges_from_parsing(parsing_img: Image.Image) -> np.ndarray:
|
| 1042 |
m = np.array(parsing_img.convert("L"), dtype=np.uint8)
|
| 1043 |
_, m_bin = cv2.threshold(m, 127, 255, cv2.THRESH_BINARY)
|
|
@@ -1362,7 +1400,7 @@ def infer_web(person_fp, sketch_fp, style_fp, prompt, steps, seed, category):
|
|
| 1362 |
|
| 1363 |
|
| 1364 |
with gr.Blocks(title="VISTA Demo (HF Spaces)") as demo:
|
| 1365 |
-
gr.Markdown("##
|
| 1366 |
|
| 1367 |
category_toggle = gr.Radio(
|
| 1368 |
choices=["Dress", "Upper-body", "Lower-body"],
|
|
|
|
| 1013 |
return Image.fromarray(m, mode="L").convert("RGB")
|
| 1014 |
|
| 1015 |
|
| 1016 |
+
# def make_depth(depth_path: str) -> Image.Image:
|
| 1017 |
+
# global H, W
|
| 1018 |
+
# if H is None or W is None:
|
| 1019 |
+
# raise RuntimeError("Global H/W not set. Call run_one() first.")
|
| 1020 |
+
|
| 1021 |
+
# depth_img = _imread_or_raise(depth_path, 0)
|
| 1022 |
+
# contours, _ = cv2.findContours(depth_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 1023 |
+
|
| 1024 |
+
# filled_depth = depth_img.copy()
|
| 1025 |
+
# cv2.drawContours(filled_depth, contours, -1, (255), thickness=cv2.FILLED)
|
| 1026 |
+
|
| 1027 |
+
# filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_AREA)
|
| 1028 |
+
# filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
|
| 1029 |
+
|
| 1030 |
+
# inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
|
| 1031 |
+
|
| 1032 |
+
# with torch.inference_mode():
|
| 1033 |
+
# image_depth = depth_estimator(inverted_image)["depth"]
|
| 1034 |
+
|
| 1035 |
+
# if DEBUG_SAVE:
|
| 1036 |
+
# image_depth.save("depth.png")
|
| 1037 |
+
|
| 1038 |
+
# return image_depth
|
| 1039 |
+
|
| 1040 |
def make_depth(depth_path: str) -> Image.Image:
|
| 1041 |
global H, W
|
| 1042 |
if H is None or W is None:
|
| 1043 |
raise RuntimeError("Global H/W not set. Call run_one() first.")
|
| 1044 |
|
| 1045 |
+
depth_img = _imread_or_raise(depth_path, 0) # grayscale
|
|
|
|
| 1046 |
|
| 1047 |
+
# (์ ํ) ์
๋ ฅ์ด ์์ ํ 0/255๊ฐ ์๋๋ผ๋ฉด ์ด์งํ๋ก ๊ณ ์
|
| 1048 |
+
_, depth_bin = cv2.threshold(depth_img, 127, 255, cv2.THRESH_BINARY)
|
| 1049 |
+
|
| 1050 |
+
# ์ปจํฌ์ด ์ฑ์ฐ๊ธฐ๊ฐ "๋๊บผ์ ๋ณด์"์ ์์ธ์ผ ์๋ ์์ด, ์ ์ง/์ ๊ฑฐ ์ ํ ๊ฐ๋ฅ
|
| 1051 |
+
# 1) ์ฑ์ฐ๊ธฐ ์ ์ง (holes ๋ฉ์ฐ๋ ๋ชฉ์ ์ด๋ผ๋ฉด)
|
| 1052 |
+
contours, _ = cv2.findContours(depth_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 1053 |
+
filled_depth = np.zeros_like(depth_bin)
|
| 1054 |
+
cv2.drawContours(filled_depth, contours, -1, 255, thickness=cv2.FILLED)
|
| 1055 |
+
|
| 1056 |
+
# 2) ์ฑ์ฐ๊ธฐ ์ ๊ฑฐํ๊ณ ์ถ์ผ๋ฉด ์ 3์ค ๋์ ์ด๊ฑธ ์ฌ์ฉ:
|
| 1057 |
+
# filled_depth = depth_bin
|
| 1058 |
+
|
| 1059 |
+
# โ
๋ง์คํฌ ๋ฆฌ์ฌ์ด์ฆ๋ NEAREST (๊ฒฝ๊ณ ๋ฒ์ง/ํฝ์ฐฝ ๋๋ ๋ฐฉ์ง)
|
| 1060 |
+
filled_depth = cv2.resize(filled_depth, (W, H), interpolation=cv2.INTER_NEAREST)
|
| 1061 |
+
|
| 1062 |
+
# (์ ํ) ๋ฆฌ์ฌ์ด์ฆ ํ์๋ 0/255 ๊ฐ์
|
| 1063 |
+
_, filled_depth = cv2.threshold(filled_depth, 127, 255, cv2.THRESH_BINARY)
|
| 1064 |
|
|
|
|
| 1065 |
filled_depth = _pad_or_crop_to_width_np(filled_depth, 1024, pad_value=0)
|
| 1066 |
|
| 1067 |
inverted_image = ImageOps.invert(Image.fromarray(filled_depth))
|
|
|
|
| 1075 |
return image_depth
|
| 1076 |
|
| 1077 |
|
| 1078 |
+
|
| 1079 |
def _edges_from_parsing(parsing_img: Image.Image) -> np.ndarray:
|
| 1080 |
m = np.array(parsing_img.convert("L"), dtype=np.uint8)
|
| 1081 |
_, m_bin = cv2.threshold(m, 127, 255, cv2.THRESH_BINARY)
|
|
|
|
| 1400 |
|
| 1401 |
|
| 1402 |
with gr.Blocks(title="VISTA Demo (HF Spaces)") as demo:
|
| 1403 |
+
gr.Markdown("## ์ฒซ inference๋ ๋ชจ๋ธ ๋ก๋ฉ ๋๋ฌธ์ ์ค๋ ๊ฑธ๋ฆด ์ ์์ต๋๋ค.")
|
| 1404 |
|
| 1405 |
category_toggle = gr.Radio(
|
| 1406 |
choices=["Dress", "Upper-body", "Lower-body"],
|