tjrlgns09 commited on
Commit
02a7bf9
ยท
1 Parent(s): 5b98238
.dockerignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ *.log
6
+ .git
7
+ .gitignore
8
+ .venv/
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.index filter=lfs diff=lfs merge=lfs -text
37
+ /*.index filter=lfs diff=lfs merge=lfs -text
38
+ /*.pkl filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ *.log
6
+ .venv/
Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dockerfile
2
+ FROM python:3.10-slim
3
+
4
+ # ํ•„์š”ํ•œ ํŒจํ‚ค์ง€, ๊ฐœ๋ฐœ ๋„๊ตฌ ๋ฐ git ์„ค์น˜ (root ๊ถŒํ•œ)
5
+ RUN apt-get update && \
6
+ apt-get install -y tzdata build-essential libsm6 libxext6 libgl1 libglib2.0-0 git curl wget && \
7
+ ln -sf /usr/share/zoneinfo/Asia/Seoul /etc/localtime && \
8
+ echo "Asia/Seoul" > /etc/timezone && \
9
+ apt-get clean && \
10
+ rm -rf /var/lib/apt/lists/*
11
+
12
+ RUN useradd -m -u 1000 user
13
+ USER user
14
+ ENV PATH="/home/user/.local/bin:$PATH"
15
+
16
+ WORKDIR /app
17
+
18
+ COPY --chown=user ./requirements.txt requirements.txt
19
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
20
+
21
+ # โœ… ์ค‘์š”: AdaFace ์ €์žฅ์†Œ ํด๋ก  ๋ฐ ๊ฐ€์ค‘์น˜ ๋ฏธ๋ฆฌ ๋‹ค์šด๋กœ๋“œ (์„œ๋ฒ„ ์‹œ์ž‘ ์†๋„ ์ตœ์ ํ™”)
22
+ RUN git clone https://github.com/mk-minchul/AdaFace.git /app/AdaFace
23
+ RUN python -c "from huggingface_hub import hf_hub_download; import shutil; cache_path = hf_hub_download(repo_id='VishalMishraTss/AdaFace', filename='adaface_ir101_webface12m.ckpt'); shutil.copy(cache_path, '/app/adaface_ir101_webface12m.ckpt')"
24
+
25
+ # ์ตœ์ข… ์•ฑ ์ฝ”๋“œ ๋ณต์‚ฌ
26
+ COPY --chown=user . /app
27
+
28
+ # ์„œ๋ฒ„ ์‹คํ–‰ ๋ช…๋ น (Hugging Face Spaces ๊ธฐ๋ณธ ํฌํŠธ 7860)
29
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile
2
+ from fastapi.responses import JSONResponse
3
+ from insightface.app import FaceAnalysis
4
+ import numpy as np
5
+ import cv2
6
+ import faiss
7
+ import pickle
8
+ import os
9
+ import uvicorn
10
+ import tempfile
11
+
12
+ from routers import embed_v2, predict
13
+
14
+ # โœ… FastAPI ์•ฑ ์ƒ์„ฑ
15
+ app = FastAPI()
16
+
17
+ app.include_router(embed_v2.router, prefix="/embed")
18
+ app.include_router(predict.router, prefix="/predict")
19
+
20
+ @app.get("/")
21
+ def hello():
22
+ return {"msg": "Hello FastAPI!"}
23
+
24
+ # โœ… ๋กœ์ปฌ์—์„œ ์‹คํ–‰ํ•  ๊ฒฝ์šฐ
25
+ if __name__ == "__main__":
26
+ uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
binaryfile_force_push.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ git lfs install
2
+ git lfs migrate import --everything --include "*.index"
3
+ git lfs migrate import --everything --include "*.pkl"
4
+ git push origin main --force
embed.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import faiss
4
+ import pickle
5
+ import numpy as np
6
+ import pandas as pd
7
+ from pathlib import Path
8
+ import insightface
9
+ import albumentations as A
10
+
11
+ # ๐Ÿ”ง ์ฆ๊ฐ• ์„ค์ •
12
+ augment = A.Compose([
13
+ A.HorizontalFlip(p=0.5),
14
+ A.RandomBrightnessContrast(p=0.3),
15
+ A.Rotate(limit=15, p=0.3),
16
+ ])
17
+
18
+ # ๐Ÿš€ ๋ชจ๋ธ ์ดˆ๊ธฐํ™” ํ•จ์ˆ˜
19
+ def load_face_model(device: str = "cpu"):
20
+ providers = ["CPUExecutionProvider"] if device == "cpu" else ["CUDAExecutionProvider"]
21
+ model = insightface.app.FaceAnalysis(name='buffalo_l', providers=providers)
22
+ model.prepare(ctx_id=0 if device != "cpu" else -1)
23
+ return model
24
+
25
+ # ๐Ÿš€ ์ž„๋ฒ ๋”ฉ ์ถ”์ถœ ํ•จ์ˆ˜
26
+ def get_face_embedding(image_path: str, model, n_augment: int = 5):
27
+ img = cv2.imread(str(image_path))
28
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
29
+ embeddings = []
30
+
31
+ # ์›๋ณธ
32
+ faces = model.get(img)
33
+ if faces:
34
+ embeddings.append(faces[0].embedding)
35
+ else:
36
+ print(f"โŒ ์–ผ๊ตด ์ธ์‹ ์‹คํŒจ (์›๋ณธ): {image_path}")
37
+
38
+ # ์ฆ๊ฐ•
39
+ for i in range(n_augment):
40
+ augmented = augment(image=img)
41
+ img_aug = augmented['image']
42
+ faces = model.get(img_aug)
43
+ if faces:
44
+ embeddings.append(faces[0].embedding)
45
+ else:
46
+ print(f"โŒ ์–ผ๊ตด ์ธ์‹ ์‹คํŒจ (์ฆ๊ฐ• {i+1}): {image_path}")
47
+
48
+ if embeddings:
49
+ return np.mean(embeddings, axis=0)
50
+ else:
51
+ print(f"โŒ ๋ชจ๋“  ์‹œ๋„ ์‹คํŒจ: {image_path}")
52
+ return None
53
+
54
+ # ๐Ÿš€ ํด๋” ์Šค์บ” ๋ฐ ์ž„๋ฒ ๋”ฉ ์ถ”์ถœ
55
+ def process_folder(data_folder: str, model) -> pd.DataFrame:
56
+ data = []
57
+ data_path = Path(data_folder)
58
+ for person_dir in data_path.iterdir():
59
+ if not person_dir.is_dir():
60
+ continue
61
+ label = person_dir.name
62
+ print(f"โ–ถ ํด๋”: {label}")
63
+ count = 0
64
+ for image_path in person_dir.glob("*"):
65
+ if image_path.suffix.lower() not in [".jpg", ".jpeg", ".png"]:
66
+ continue
67
+ emb = get_face_embedding(image_path, model)
68
+ if emb is not None:
69
+ data.append({
70
+ "label": label,
71
+ "image_path": str(image_path),
72
+ "embedding": emb
73
+ })
74
+ count += 1
75
+ print(f"โœ… ์–ผ๊ตด ์ธ์‹ ์„ฑ๊ณต ์ˆ˜: {count}")
76
+ return pd.DataFrame(data)
77
+
78
+ # ๐Ÿš€ FAISS ์ธ๋ฑ์Šค ์ƒ์„ฑ ๋ฐ ์ €์žฅ
79
+ def build_and_save_faiss(train_df: pd.DataFrame, save_path: str):
80
+ embeddings = np.stack(train_df['embedding'].values).astype('float32')
81
+ embeddings /= np.linalg.norm(embeddings, axis=1, keepdims=True)
82
+
83
+ index = faiss.IndexFlatIP(embeddings.shape[1])
84
+ index.add(embeddings)
85
+ faiss.write_index(index, os.path.join(save_path, "faiss_index.index"))
86
+
87
+ labels = train_df['label'].tolist()
88
+ with open(os.path.join(save_path, "faiss_labels.pkl"), "wb") as f:
89
+ pickle.dump(labels, f)
90
+
91
+ # ์ „์ฒด ๋ฐ์ดํ„ฐํ”„๋ ˆ์ž„ ์ €์žฅ (์„ ํƒ)
92
+ train_df.to_pickle(os.path.join(save_path, "train_df.pkl"))
93
+
94
+ print("โœ… FAISS ์ธ๋ฑ์Šค & ๋ผ๋ฒจ ์ €์žฅ ์™„๋ฃŒ")
95
+ return index, labels, train_df
96
+
97
+ # ๐Ÿš€ ์ „์ฒด ์‹คํ–‰ ํ•จ์ˆ˜
98
+ def run_pipeline(data_folder: str, save_path: str, device: str = "cpu"):
99
+ os.makedirs(save_path, exist_ok=True)
100
+ print("๐Ÿš€ ์–ผ๊ตด ๋ชจ๋ธ ๋ถˆ๋Ÿฌ์˜ค๋Š” ์ค‘...")
101
+ model = load_face_model(device)
102
+
103
+ print("๐Ÿš€ ์ž„๋ฒ ๋”ฉ ์ถ”์ถœ ์‹œ์ž‘...")
104
+ train_df = process_folder(data_folder, model)
105
+
106
+ print("๐Ÿš€ FAISS ์ธ๋ฑ์Šค ์ƒ์„ฑ ๋ฐ ์ €์žฅ ์ค‘...")
107
+ index, labels, df = build_and_save_faiss(train_df, save_path)
108
+
109
+ return index, labels, df
110
+
111
+
112
+ data_folder = "./person"
113
+ save_path = "./embedding/person"
114
+ index, labels, df = run_pipeline(data_folder, save_path, device="cpu")
embedding/person/face_faiss_index_v2.index ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60e32bc0841fc0502971525daa6ca6ead0475d9f13155e6383e5c1d8648c0e94
3
+ size 8818733
embedding/person/face_faiss_labels_v2.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f890a916860025325ff32ca0cf98c325e549d0c3a1da1b35d0ec6aa87eb5a0b3
3
+ size 8700
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ insightface
2
+ faiss-cpu
3
+ opencv-python
4
+ numpy
5
+ pandas
6
+ albumentations
7
+ torch
8
+ torchvision
9
+ scikit-learn
10
+ fastapi
11
+ uvicorn[standard]
12
+ onnxruntime
13
+ python-multipart
14
+ sqlalchemy
15
+ psycopg2-binary
16
+ huggingface_hub
routers/embed_v2.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi import APIRouter
3
+ from fastapi.responses import JSONResponse
4
+ import os
5
+ import cv2
6
+ import faiss
7
+ import pickle
8
+ import numpy as np
9
+ import pandas as pd
10
+ from pathlib import Path
11
+ import insightface
12
+ import albumentations as A
13
+
14
+ # ๐Ÿ”ง ๊ฒฝ๋กœ ์„ค์ • (ํ•˜๋“œ์ฝ”๋”ฉ)
15
+ data_folder = os.path.abspath("person")
16
+ save_path = os.path.abspath("embedding/person")
17
+ faiss_index_name="face_faiss_index_v2.index"
18
+ faiss_label_name="face_faiss_labels_v2.pkl"
19
+ traindf_name="train_df.pkl"
20
+
21
+
22
+ # โœ… FastAPI ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ
23
+ router = APIRouter()
24
+
25
+ # ๐Ÿ”ง ์ฆ๊ฐ• ์„ค์ •
26
+ augment = A.Compose([
27
+ A.HorizontalFlip(p=0.5),
28
+ A.RandomBrightnessContrast(p=0.3),
29
+ A.Rotate(limit=15, p=0.3),
30
+ ])
31
+
32
+ # ๐Ÿš€ ๋ชจ๋ธ ์ดˆ๊ธฐํ™” ํ•จ์ˆ˜
33
+ def load_face_model(device: str = "cpu"):
34
+ providers = ["CPUExecutionProvider"] if device == "cpu" else ["CUDAExecutionProvider"]
35
+ model = insightface.app.FaceAnalysis(name='buffalo_l', providers=providers)
36
+ model.prepare(ctx_id=0 if device != "cpu" else -1)
37
+ return model
38
+
39
+ # ๐Ÿš€ ์ž„๋ฒ ๋”ฉ ์ถ”์ถœ ํ•จ์ˆ˜
40
+ def get_face_embedding(image_path: str, model, n_augment: int = 5):
41
+ img = cv2.imread(str(image_path))
42
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
43
+ embeddings = []
44
+
45
+ # ์›๋ณธ
46
+ faces = model.get(img)
47
+ if faces:
48
+ embeddings.append(faces[0].embedding)
49
+ else:
50
+ print(f"โŒ ์–ผ๊ตด ์ธ์‹ ์‹คํŒจ (์›๋ณธ): {image_path}")
51
+
52
+ # ์ฆ๊ฐ•
53
+ for i in range(n_augment):
54
+ augmented = augment(image=img)
55
+ img_aug = augmented['image']
56
+ faces = model.get(img_aug)
57
+ if faces:
58
+ embeddings.append(faces[0].embedding)
59
+ else:
60
+ print(f"โŒ ์–ผ๊ตด ์ธ์‹ ์‹คํŒจ (์ฆ๊ฐ• {i+1}): {image_path}")
61
+
62
+ if embeddings:
63
+ return np.mean(embeddings, axis=0)
64
+ else:
65
+ print(f"โŒ ๋ชจ๋“  ์‹œ๋„ ์‹คํŒจ: {image_path}")
66
+ return None
67
+
68
+ # ๐Ÿš€ ํด๋” ์Šค์บ” ๋ฐ ์ž„๋ฒ ๋”ฉ ์ถ”์ถœ
69
+ def process_folder(data_folder: str, model) -> pd.DataFrame:
70
+ data = []
71
+ data_path = Path(data_folder)
72
+ for person_dir in data_path.iterdir():
73
+ if not person_dir.is_dir():
74
+ continue
75
+ label = person_dir.name
76
+ print(f"โ–ถ ํด๋”: {label}")
77
+ count = 0
78
+ for image_path in person_dir.glob("*"):
79
+ if image_path.suffix.lower() not in [".jpg", ".jpeg", ".png"]:
80
+ continue
81
+ emb = get_face_embedding(image_path, model)
82
+ if emb is not None:
83
+ data.append({
84
+ "label": label,
85
+ "image_path": str(image_path),
86
+ "embedding": emb
87
+ })
88
+ count += 1
89
+ print(f"โœ… ์–ผ๊ตด ์ธ์‹ ์„ฑ๊ณต ์ˆ˜: {count}")
90
+ return pd.DataFrame(data)
91
+
92
+ # ๐Ÿš€ FAISS ์ธ๋ฑ์Šค ์ƒ์„ฑ ๋ฐ ์ €์žฅ
93
+ def build_and_save_faiss(train_df: pd.DataFrame, save_path: str):
94
+ embeddings = np.stack(train_df['embedding'].values).astype('float32')
95
+ embeddings /= np.linalg.norm(embeddings, axis=1, keepdims=True)
96
+
97
+ index = faiss.IndexFlatIP(embeddings.shape[1])
98
+ index.add(embeddings)
99
+ faiss.write_index(index, os.path.join(save_path, faiss_index_name))
100
+
101
+ labels = train_df['label'].tolist()
102
+ with open(os.path.join(save_path, faiss_label_name), "wb") as f:
103
+ pickle.dump(labels, f)
104
+
105
+ train_df.to_pickle(os.path.join(save_path, traindf_name))
106
+
107
+ print("โœ… FAISS ์ธ๋ฑ์Šค & ๋ผ๋ฒจ ์ €์žฅ ์™„๋ฃŒ")
108
+ return index, labels, train_df
109
+
110
+ # ๐Ÿš€ ์ „์ฒด ์‹คํ–‰ ํ•จ์ˆ˜
111
+ def run_pipeline(data_folder: str, save_path: str, device: str = "cpu"):
112
+ os.makedirs(save_path, exist_ok=True)
113
+ print("๐Ÿš€ ์–ผ๊ตด ๋ชจ๋ธ ๋ถˆ๋Ÿฌ์˜ค๋Š” ์ค‘...")
114
+ model = load_face_model(device)
115
+
116
+ print("๐Ÿš€ ์ž„๋ฒ ๋”ฉ ์ถ”์ถœ ์‹œ์ž‘...")
117
+ train_df = process_folder(data_folder, model)
118
+
119
+ print("๐Ÿš€ FAISS ์ธ๋ฑ์Šค ์ƒ์„ฑ ๋ฐ ์ €์žฅ ์ค‘...")
120
+ index, labels, df = build_and_save_faiss(train_df, save_path)
121
+
122
+ return len(df)
123
+
124
+ # โœ… API ์—”๋“œํฌ์ธํŠธ
125
+ @router.post("/train")
126
+ def train_faces():
127
+ try:
128
+ count = run_pipeline(data_folder, save_path)
129
+ return {"status": "success", "count": count}
130
+ except Exception as e:
131
+ return JSONResponse(status_code=500, content={"status": "error", "message": str(e)})
132
+
133
+ """
134
+ # โœ… ๋กœ์ปฌ ์‹คํ–‰
135
+ if __name__ == "__main__":
136
+ import uvicorn
137
+ uvicorn.run("embed_v2:app", host="0.0.0.0", port=8000, reload=True)
138
+ """
routers/predict.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, File, UploadFile
2
+ import numpy as np
3
+ import cv2
4
+ import faiss
5
+ import pickle
6
+ import os
7
+ import torch
8
+ import insightface
9
+ from insightface.utils import face_align
10
+ import sys
11
+
12
+ # โœ… Dockerfile์—์„œ ํด๋ก ํ•œ AdaFace ๊ฒฝ๋กœ ์ถ”๊ฐ€
13
+ sys.path.append('/app/AdaFace')
14
+ import net
15
+
16
+ router = APIRouter()
17
+
18
+ # --- ์„ค์ • ๋ฐ ๊ฒฝ๋กœ ---
19
+ faiss_index_name = "face_faiss_index_v2.index"
20
+ faiss_label_name = "face_faiss_labels_v2.pkl"
21
+ load_path = os.path.abspath("embedding/person") # ์‹ค์ œ FAISS ํŒŒ์ผ ์œ„์น˜๋กœ ๋ณ€๊ฒฝ ํ•„์š”
22
+ threshold = 45.0 # Unknown ํŒ๋ณ„ ์ž„๊ณ„๊ฐ’
23
+
24
+ # โœ… Hugging Face ๋ฌด๋ฃŒ CPU ๊ฐ•์ œ ์„ค์ •
25
+ device = torch.device('cpu')
26
+
27
+ # --- 1. InsightFace (ํƒ์ง€๊ธฐ) ๋กœ๋“œ ---
28
+ detector = insightface.app.FaceAnalysis(name='buffalo_l', providers=['CPUExecutionProvider'], allowed_modules=['detection'])
29
+ detector.prepare(ctx_id=0, det_size=(640, 640))
30
+
31
+ # --- 2. AdaFace (์ธ์‹๊ธฐ) ๋กœ๋“œ ---
32
+ model_path = "/app/adaface_ir101_webface12m.ckpt"
33
+ adaface_model = net.build_model('ir_101')
34
+ statedict = torch.load(model_path, map_location=device)["state_dict"]
35
+ model_statedict = {key[6:]: val for key, val in statedict.items() if key.startswith("model.")}
36
+ adaface_model.load_state_dict(model_statedict)
37
+ adaface_model.to(device)
38
+ adaface_model.eval()
39
+
40
+ # --- 3. FAISS ๋กœ๋“œ ---
41
+ index = faiss.read_index(os.path.join(load_path, faiss_index_name))
42
+ with open(os.path.join(load_path, faiss_label_name), "rb") as f:
43
+ labels = pickle.load(f)
44
+
45
+ # โœ… AdaFace ์ž„๋ฒ ๋”ฉ ์ถ”์ถœ ํ•จ์ˆ˜
46
+ def extract_adaface_embedding(img_bgr, face_kps):
47
+ aligned_face = face_align.norm_crop(img_bgr, landmark=face_kps, image_size=112)
48
+ img_norm = (aligned_face / 255.0 - 0.5) / 0.5
49
+ img_tensor = torch.tensor(img_norm.transpose(2, 0, 1)).float().unsqueeze(0).to(device)
50
+ with torch.no_grad():
51
+ embedding, _ = adaface_model(img_tensor)
52
+ return embedding.cpu().numpy()[0]
53
+
54
+ @router.post("/predict")
55
+ async def predict(file: UploadFile = File(...)):
56
+ contents = await file.read()
57
+ nparr = np.frombuffer(contents, np.uint8)
58
+ img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
59
+
60
+ if img is None:
61
+ return {"success": False, "message": "โŒ ์ด๋ฏธ์ง€๋ฅผ ์ฝ์„ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค."}
62
+
63
+ faces = detector.get(img)
64
+ if not faces:
65
+ return {"success": False, "message": "โŒ ์–ผ๊ตด์„ ์ฐพ์„ ์ˆ˜ ์—†์Šต๋‹ˆ๋‹ค."}
66
+
67
+ results = []
68
+ for face in faces:
69
+ # AdaFace ์ž„๋ฒ ๋”ฉ ์ถ”์ถœ ๋ฐ ์ •๊ทœํ™”
70
+ emb = extract_adaface_embedding(img, face.kps)
71
+ emb = np.array([emb], dtype='float32')
72
+ emb /= np.linalg.norm(emb, axis=1, keepdims=True)
73
+
74
+ # FAISS ๊ฒ€์ƒ‰
75
+ distances, indices = index.search(emb, k=1)
76
+ best_match_idx = indices[0][0]
77
+ similarity_score = distances[0][0]
78
+
79
+ # ์ผ์น˜์œจ ๊ณ„์‚ฐ ๋ฐ Threshold ์ ์šฉ
80
+ score_percent = max(0, similarity_score) * 100
81
+ if score_percent >= threshold:
82
+ predicted_name = labels[best_match_idx]
83
+ else:
84
+ predicted_name = "Unknown"
85
+
86
+ box = face.bbox.astype(int).tolist()
87
+
88
+ results.append({
89
+ "label": predicted_name,
90
+ "score": float(score_percent),
91
+ "bbox": box # ํ”„๋ก ํŠธ์—”๋“œ์—์„œ ๋ฐ•์Šค๋ฅผ ๊ทธ๋ฆด ์ˆ˜ ์žˆ๋„๋ก ์ขŒํ‘œ ๋ฐ˜ํ™˜
92
+ })
93
+
94
+ return {"success": True, "results": results, "message": f"โœ… ์ด {len(faces)}๋ช…์˜ ์–ผ๊ตด์„ ์ฒ˜๋ฆฌํ–ˆ์Šต๋‹ˆ๋‹ค."}