remove small points
Browse files
app.py
CHANGED
|
@@ -180,55 +180,47 @@ def apply_parsing_white_mask_to_person_cv2(
|
|
| 180 |
return result_bgr
|
| 181 |
|
| 182 |
|
| 183 |
-
def
|
| 184 |
parsing_img: Image.Image,
|
| 185 |
*,
|
| 186 |
white_threshold: int = 128,
|
| 187 |
-
min_white_area: int =
|
| 188 |
-
|
| 189 |
open_ksize: int = 3,
|
| 190 |
morph_iters: int = 1,
|
| 191 |
-
blur_ksize: int = 0,
|
| 192 |
) -> Image.Image:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
if not isinstance(parsing_img, Image.Image):
|
| 194 |
raise TypeError("parsing_img must be a PIL.Image.Image")
|
| 195 |
|
| 196 |
arr = np.array(parsing_img.convert("L"), dtype=np.uint8)
|
| 197 |
-
mask = np.where(arr >= white_threshold, 255, 0).astype(np.uint8)
|
| 198 |
|
|
|
|
| 199 |
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask, connectivity=8)
|
| 200 |
keep = np.zeros_like(mask)
|
| 201 |
for lab in range(1, num_labels):
|
| 202 |
area = int(stats[lab, cv2.CC_STAT_AREA])
|
| 203 |
-
if area >= min_white_area:
|
| 204 |
keep[labels == lab] = 255
|
| 205 |
mask = keep
|
| 206 |
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
open_k = _odd_or_one(open_ksize)
|
| 215 |
-
|
| 216 |
-
if close_k > 1:
|
| 217 |
-
k_close = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (close_k, close_k))
|
| 218 |
-
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, k_close, iterations=int(morph_iters))
|
| 219 |
-
|
| 220 |
-
if open_k > 1:
|
| 221 |
-
k_open = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (open_k, open_k))
|
| 222 |
-
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, k_open, iterations=int(morph_iters))
|
| 223 |
-
|
| 224 |
-
if blur_ksize and int(blur_ksize) > 1:
|
| 225 |
-
b = _odd_or_one(int(blur_ksize))
|
| 226 |
-
mask_blur = cv2.GaussianBlur(mask, (b, b), 0)
|
| 227 |
-
mask = np.where(mask_blur >= 128, 255, 0).astype(np.uint8)
|
| 228 |
|
| 229 |
return Image.fromarray(mask, mode="L")
|
| 230 |
|
| 231 |
|
|
|
|
| 232 |
def compute_hw_from_person(person_path: str):
|
| 233 |
img = _imread_or_raise(person_path)
|
| 234 |
orig_h, orig_w = img.shape[:2]
|
|
@@ -446,16 +438,17 @@ def run_one(paths: Paths, prompt: str, steps: int = DEFAULT_STEPS, category: str
|
|
| 446 |
parsing_img = res["images"][0] if res.get("images") else None
|
| 447 |
if parsing_img is None:
|
| 448 |
raise RuntimeError("run_simple_extractor returned no parsing images.")
|
|
|
|
| 449 |
|
| 450 |
-
|
|
|
|
| 451 |
parsing_img,
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
morph_iters=1,
|
| 456 |
-
blur_ksize=7,
|
| 457 |
)
|
| 458 |
|
|
|
|
| 459 |
use_depth_path = (
|
| 460 |
paths.depth_path is not None
|
| 461 |
and isinstance(paths.depth_path, str)
|
|
@@ -533,8 +526,6 @@ def run_one(paths: Paths, prompt: str, steps: int = DEFAULT_STEPS, category: str
|
|
| 533 |
pass
|
| 534 |
|
| 535 |
style_img = Image.open(paths.style_path).convert("RGB")
|
| 536 |
-
|
| 537 |
-
prompt = extractor_category + " with " + prompt
|
| 538 |
|
| 539 |
if prompt != "":
|
| 540 |
prompt = extractor_category + " with " + prompt
|
|
@@ -659,7 +650,7 @@ with gr.Blocks(title="VISTA Demo (HF Spaces)") as demo:
|
|
| 659 |
prompt_in = gr.Textbox(
|
| 660 |
label="Prompt",
|
| 661 |
value="",
|
| 662 |
-
placeholder="
|
| 663 |
lines=2,
|
| 664 |
)
|
| 665 |
steps_in = gr.Slider(1, 80, value=DEFAULT_STEPS, step=1, label="Steps")
|
|
|
|
| 180 |
return result_bgr
|
| 181 |
|
| 182 |
|
| 183 |
+
def remove_small_white_components(
|
| 184 |
parsing_img: Image.Image,
|
| 185 |
*,
|
| 186 |
white_threshold: int = 128,
|
| 187 |
+
min_white_area: int = 50,
|
| 188 |
+
use_open: bool = False,
|
| 189 |
open_ksize: int = 3,
|
| 190 |
morph_iters: int = 1,
|
|
|
|
| 191 |
) -> Image.Image:
|
| 192 |
+
"""
|
| 193 |
+
- ํฐ์(=foreground)์ผ๋ก ์ด์งํ
|
| 194 |
+
- connected components๋ก '์์ ํฐ์ ๋ฉ์ด๋ฆฌ'๋ง ์ ๊ฑฐ
|
| 195 |
+
- (์ต์
) OPEN์ ์์ฃผ ์ฝํ๊ฒ ์ ์ฉํด ์์ ์ /๊ฐ์ ์ ๊ฑฐ (ํฐ์์ด ๋์ด๋๋ CLOSE๋ ์ฌ์ฉ X)
|
| 196 |
+
"""
|
| 197 |
if not isinstance(parsing_img, Image.Image):
|
| 198 |
raise TypeError("parsing_img must be a PIL.Image.Image")
|
| 199 |
|
| 200 |
arr = np.array(parsing_img.convert("L"), dtype=np.uint8)
|
| 201 |
+
mask = np.where(arr >= int(white_threshold), 255, 0).astype(np.uint8)
|
| 202 |
|
| 203 |
+
# 1) ์์ ํฐ์ ์ฐ๊ฒฐ์์ ์ ๊ฑฐ
|
| 204 |
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask, connectivity=8)
|
| 205 |
keep = np.zeros_like(mask)
|
| 206 |
for lab in range(1, num_labels):
|
| 207 |
area = int(stats[lab, cv2.CC_STAT_AREA])
|
| 208 |
+
if area >= int(min_white_area):
|
| 209 |
keep[labels == lab] = 255
|
| 210 |
mask = keep
|
| 211 |
|
| 212 |
+
# 2) (์ต์
) OPEN: ์์ ํฐ ์ /๊ฐ์ ์ ๊ฑฐ + ๊ฒฝ๊ณ ์ฝ๊ฐ ์ ๋ฆฌ (ํฐ์ ์ฆ๊ฐ ๋ฐฉํฅ ์๋)
|
| 213 |
+
if use_open and int(open_ksize) > 1:
|
| 214 |
+
k = int(open_ksize)
|
| 215 |
+
if k % 2 == 0:
|
| 216 |
+
k += 1
|
| 217 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (k, k))
|
| 218 |
+
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=int(morph_iters))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
|
| 220 |
return Image.fromarray(mask, mode="L")
|
| 221 |
|
| 222 |
|
| 223 |
+
|
| 224 |
def compute_hw_from_person(person_path: str):
|
| 225 |
img = _imread_or_raise(person_path)
|
| 226 |
orig_h, orig_w = img.shape[:2]
|
|
|
|
| 438 |
parsing_img = res["images"][0] if res.get("images") else None
|
| 439 |
if parsing_img is None:
|
| 440 |
raise RuntimeError("run_simple_extractor returned no parsing images.")
|
| 441 |
+
|
| 442 |
|
| 443 |
+
|
| 444 |
+
parsing_img = remove_small_white_components(
|
| 445 |
parsing_img,
|
| 446 |
+
white_threshold=128,
|
| 447 |
+
min_white_area=150, # ๋ฐ์ดํฐ์ ๋ง๊ฒ 30~200 ์ฌ์ด ์กฐ์
|
| 448 |
+
use_open=False,
|
|
|
|
|
|
|
| 449 |
)
|
| 450 |
|
| 451 |
+
|
| 452 |
use_depth_path = (
|
| 453 |
paths.depth_path is not None
|
| 454 |
and isinstance(paths.depth_path, str)
|
|
|
|
| 526 |
pass
|
| 527 |
|
| 528 |
style_img = Image.open(paths.style_path).convert("RGB")
|
|
|
|
|
|
|
| 529 |
|
| 530 |
if prompt != "":
|
| 531 |
prompt = extractor_category + " with " + prompt
|
|
|
|
| 650 |
prompt_in = gr.Textbox(
|
| 651 |
label="Prompt",
|
| 652 |
value="",
|
| 653 |
+
placeholder="ex) lace, button, โฆ",
|
| 654 |
lines=2,
|
| 655 |
)
|
| 656 |
steps_in = gr.Slider(1, 80, value=DEFAULT_STEPS, step=1, label="Steps")
|