Spaces:
Sleeping
Sleeping
iljung1106
commited on
Commit
·
b1b0bc5
1
Parent(s):
93d1be8
Use single-eye
Browse files- app/visualization.py +1 -1
- webui_gradio.py +8 -8
app/visualization.py
CHANGED
|
@@ -274,7 +274,7 @@ def format_view_weights_html(analysis: ViewAnalysis) -> str:
|
|
| 274 |
view_info = {
|
| 275 |
"whole": ("Whole Image", "#4CAF50"), # green
|
| 276 |
"face": ("Face", "#2196F3"), # blue
|
| 277 |
-
"eyes": ("
|
| 278 |
}
|
| 279 |
|
| 280 |
html_parts = ['<div style="font-family: sans-serif; padding: 10px;">']
|
|
|
|
| 274 |
view_info = {
|
| 275 |
"whole": ("Whole Image", "#4CAF50"), # green
|
| 276 |
"face": ("Face", "#2196F3"), # blue
|
| 277 |
+
"eyes": ("Eye", "#FF9800"), # orange (single-eye crop)
|
| 278 |
}
|
| 279 |
|
| 280 |
html_parts = ['<div style="font-family: sans-serif; padding: 10px;">']
|
webui_gradio.py
CHANGED
|
@@ -253,7 +253,7 @@ def load_all(ckpt_path: str, proto_path: str, device: str) -> str:
|
|
| 253 |
APP_STATE.db = db
|
| 254 |
APP_STATE.proto_path = proto_path
|
| 255 |
|
| 256 |
-
# initialize view extractor (whole -> face/
|
| 257 |
try:
|
| 258 |
cfg = ExtractorCfg(
|
| 259 |
yolo_dir=ROOT / "yolov5_anime",
|
|
@@ -298,7 +298,7 @@ def classify_and_analyze(
|
|
| 298 |
return ("❌ Provide a whole image.",) + empty_result[1:]
|
| 299 |
|
| 300 |
try:
|
| 301 |
-
# Extract face and
|
| 302 |
face_pil = None
|
| 303 |
eye_pil = None
|
| 304 |
if ex is not None:
|
|
@@ -434,7 +434,7 @@ def add_prototype(
|
|
| 434 |
k_prototypes = max(1, int(k_prototypes))
|
| 435 |
n_triplets = max(1, int(n_triplets))
|
| 436 |
|
| 437 |
-
# Step 1: Extract whole/face/
|
| 438 |
wholes: List[Image.Image] = []
|
| 439 |
faces: List[Image.Image] = []
|
| 440 |
eyes_list: List[Image.Image] = []
|
|
@@ -449,7 +449,7 @@ def add_prototype(
|
|
| 449 |
|
| 450 |
wholes.append(im)
|
| 451 |
|
| 452 |
-
# Extract face and
|
| 453 |
if ex is not None:
|
| 454 |
rgb = np.array(im.convert("RGB"))
|
| 455 |
face_rgb, eyes_rgb = ex.extract(rgb)
|
|
@@ -552,13 +552,13 @@ def build_ui() -> gr.Blocks:
|
|
| 552 |
with gr.Row():
|
| 553 |
gcam_whole = gr.Image(label="Whole Image", type="pil")
|
| 554 |
gcam_face = gr.Image(label="Face", type="pil")
|
| 555 |
-
gcam_eye = gr.Image(label="
|
| 556 |
|
| 557 |
# Extracted views
|
| 558 |
gr.Markdown("### 👁️ Auto-Extracted Views")
|
| 559 |
with gr.Row():
|
| 560 |
face_prev = gr.Image(label="Detected Face", type="pil")
|
| 561 |
-
eye_prev = gr.Image(label="Detected
|
| 562 |
|
| 563 |
run_btn.click(
|
| 564 |
classify_and_analyze,
|
|
@@ -571,8 +571,8 @@ def build_ui() -> gr.Blocks:
|
|
| 571 |
"### ⚠️ Temporary Prototypes Only\n"
|
| 572 |
"Add prototypes using random triplet combinations and K-means clustering (same as eval process).\n"
|
| 573 |
"1. Upload multiple whole images\n"
|
| 574 |
-
"2. Face and
|
| 575 |
-
"3. Random triplets (whole + face +
|
| 576 |
"4. K-means clustering creates K prototype centers\n\n"
|
| 577 |
"**These prototypes are session-only** — lost when the Space restarts."
|
| 578 |
)
|
|
|
|
| 253 |
APP_STATE.db = db
|
| 254 |
APP_STATE.proto_path = proto_path
|
| 255 |
|
| 256 |
+
# initialize view extractor (whole -> face/eye) with defaults
|
| 257 |
try:
|
| 258 |
cfg = ExtractorCfg(
|
| 259 |
yolo_dir=ROOT / "yolov5_anime",
|
|
|
|
| 298 |
return ("❌ Provide a whole image.",) + empty_result[1:]
|
| 299 |
|
| 300 |
try:
|
| 301 |
+
# Extract face and eye
|
| 302 |
face_pil = None
|
| 303 |
eye_pil = None
|
| 304 |
if ex is not None:
|
|
|
|
| 434 |
k_prototypes = max(1, int(k_prototypes))
|
| 435 |
n_triplets = max(1, int(n_triplets))
|
| 436 |
|
| 437 |
+
# Step 1: Extract whole/face/eye from all uploaded images
|
| 438 |
wholes: List[Image.Image] = []
|
| 439 |
faces: List[Image.Image] = []
|
| 440 |
eyes_list: List[Image.Image] = []
|
|
|
|
| 449 |
|
| 450 |
wholes.append(im)
|
| 451 |
|
| 452 |
+
# Extract face and eye
|
| 453 |
if ex is not None:
|
| 454 |
rgb = np.array(im.convert("RGB"))
|
| 455 |
face_rgb, eyes_rgb = ex.extract(rgb)
|
|
|
|
| 552 |
with gr.Row():
|
| 553 |
gcam_whole = gr.Image(label="Whole Image", type="pil")
|
| 554 |
gcam_face = gr.Image(label="Face", type="pil")
|
| 555 |
+
gcam_eye = gr.Image(label="Eye", type="pil")
|
| 556 |
|
| 557 |
# Extracted views
|
| 558 |
gr.Markdown("### 👁️ Auto-Extracted Views")
|
| 559 |
with gr.Row():
|
| 560 |
face_prev = gr.Image(label="Detected Face", type="pil")
|
| 561 |
+
eye_prev = gr.Image(label="Detected Eye", type="pil")
|
| 562 |
|
| 563 |
run_btn.click(
|
| 564 |
classify_and_analyze,
|
|
|
|
| 571 |
"### ⚠️ Temporary Prototypes Only\n"
|
| 572 |
"Add prototypes using random triplet combinations and K-means clustering (same as eval process).\n"
|
| 573 |
"1. Upload multiple whole images\n"
|
| 574 |
+
"2. Face and eye are auto-extracted from each\n"
|
| 575 |
+
"3. Random triplets (whole + face + eye) are created\n"
|
| 576 |
"4. K-means clustering creates K prototype centers\n\n"
|
| 577 |
"**These prototypes are session-only** — lost when the Space restarts."
|
| 578 |
)
|