HSB3119-22080292-daothivananh commited on
Commit
cf20e58
·
1 Parent(s): 18c8947
Files changed (1) hide show
  1. service/face_service.py +354 -178
service/face_service.py CHANGED
@@ -1,267 +1,438 @@
1
- # import cv2
2
- # import numpy as np
3
- # import io
4
- # import os
5
- # import threading
6
- # import logging
7
- # import urllib.request
8
- # from dataclasses import dataclass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  # from typing import Optional
10
  # from PIL import Image
 
11
  # logger = logging.getLogger(__name__)
12
- # MODEL_DIR = "models"
13
- # UPLOAD_DIR = "uploads"
14
- # YUNET_PATH = os.path.join(MODEL_DIR, "face_detection_yunet_2023mar.onnx")
15
- # SFACE_PATH = os.path.join(MODEL_DIR, "face_recognition_sface_2021dec.onnx")
16
- # YUNET_URL = "https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx"
 
17
  # SFACE_URL = "https://github.com/opencv/opencv_zoo/raw/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx"
18
  # COSINE_THRESHOLD = 0.40
 
19
  # os.makedirs(MODEL_DIR, exist_ok=True)
20
  # os.makedirs(UPLOAD_DIR, exist_ok=True)
21
- # def _download_model(url: str, path: str, name: str) -> None:
22
- # """Tải model nếu chưa có, hiển thị tiến trình."""
23
- # if os.path.exists(path):
24
- # return
25
- # logger.info(f"[Model] Đang tải {name}... (~{url.split('/')[-1]})")
26
 
27
- # def _progress(count, block_size, total_size):
28
- # pct = int(count * block_size * 100 / total_size) if total_size > 0 else 0
29
- # print(f"\r [{name}] {min(pct, 100)}%", end="", flush=True)
30
 
31
- # urllib.request.urlretrieve(url, path, _progress)
32
- # print()
33
- # logger.info(f" {name} đã tải xong → {path}")
 
 
34
 
35
 
36
- # # ═════════════════════════════════════════════════════════════════════════════
37
- # # FaceMemoryStore — In-Memory RAM, thread-safe
38
- # # ═════════════════════════════════════════════════════════════════════════════
39
  # @dataclass
40
  # class CachedFace:
41
- # person_id: str
42
- # name: str
43
- # role: str
44
- # img_path: str
45
- # encoding: np.ndarray # 128-dim SFace feature (L2-normalized)
 
46
 
47
 
48
  # class FaceMemoryStore:
49
- # """Toàn bộ encoding lưu trên RAM. Nhận diện không cần đụng DB."""
50
-
51
  # def __init__(self):
52
  # self._faces: list[CachedFace] = []
53
  # self._lock = threading.RLock()
54
  # self._loaded = False
55
 
56
  # @property
57
- # def is_loaded(self) -> bool:
58
- # return self._loaded
59
 
60
  # @property
61
- # def count(self) -> int:
62
- # with self._lock:
63
- # return len(self._faces)
64
 
65
- # # ── Startup: nạp từ DB ────────────────────────────────────────────────
66
- # def load_all(self, rows: list[dict]) -> None:
67
  # with self._lock:
68
  # self._faces = []
69
  # for row in rows:
70
  # try:
71
- # enc = np.array(row["embedding_vector"], dtype=np.float32)
72
- # enc = self._normalize(enc)
73
  # self._faces.append(CachedFace(
74
  # person_id=row["person_id"],
75
  # name=row["name"],
76
- # role=row.get("role", ""),
77
- # img_path=row.get("img_path", ""),
78
  # encoding=enc,
 
79
  # ))
80
  # except Exception as e:
81
- # logger.warning(f"[RAM] Bỏ qua {row.get('name')}: {e}")
82
  # self._loaded = True
83
- # logger.info(f" {len(self._faces)} khuôn mặt")
84
 
85
- # # ── CRUD real-time ────────────────────────────────────────────────────
86
- # def add(self, person_id: str, name: str, role: str, img_path: str, encoding: list[float]) -> None:
87
- # enc = self._normalize(np.array(encoding, dtype=np.float32))
88
  # with self._lock:
89
- # self._faces.append(CachedFace(person_id, name, role, img_path, enc))
90
- # logger.info(f" {name} | Tổng: {self.count}")
91
 
92
- # def remove_by_person(self, person_id: str) -> int:
93
  # with self._lock:
94
  # before = len(self._faces)
95
  # self._faces = [f for f in self._faces if f.person_id != person_id]
96
  # return before - len(self._faces)
97
 
98
- # def update_info(self, person_id: str, name: str, role: str) -> None:
99
  # with self._lock:
100
  # for f in self._faces:
101
  # if f.person_id == person_id:
102
- # f.name = name
103
- # f.role = role
104
-
105
- # # ── Nhận diện vectorized cosine ───────────────────────────────────────
106
- # def find_best_match(
107
- # self,
108
- # query_enc: np.ndarray,
109
- # threshold: float = COSINE_THRESHOLD,
110
- # ) -> Optional[dict]:
111
- # """
112
- # Cosine similarity = dot product (đã normalize).
113
- # Numpy matrix multiply → tính tất cả N embedding cùng lúc.
114
- # """
115
- # with self._lock:
116
- # if not self._faces:
117
- # return None
118
 
119
- # q = self._normalize(query_enc)
120
- # matrix = np.stack([f.encoding for f in self._faces]) # (N, 128)
121
- # scores = matrix @ q # (N,) cosine sim
122
-
123
- # idx = int(np.argmax(scores))
124
- # score = float(scores[idx])
125
-
126
- # if score >= threshold:
 
127
  # best = self._faces[idx]
128
  # return {
129
- # "person_id": best.person_id,
130
- # "name": best.name,
131
- # "role": best.role,
132
- # "img_path": best.img_path,
133
- # "score": score,
134
- # "distance": 1.0 - score,
 
135
  # }
136
  # return None
137
 
138
  # @staticmethod
139
- # def _normalize(v: np.ndarray) -> np.ndarray:
140
  # n = np.linalg.norm(v)
141
  # return v / n if n > 0 else v
142
 
143
 
144
- # # ═════════════════════════════════════════════════════════════════════════════
145
- # # FaceAiService — OpenCV YuNet detector + SFace recognizer
146
- # # ═════════════════════════════════════════════════════════════════════════════
147
  # class FaceAiService:
148
-
149
  # def __init__(self):
150
- # # Tải model nếu chưa có
151
- # _download_model(YUNET_URL, YUNET_PATH, "YuNet (detection)")
152
- # _download_model(SFACE_URL, SFACE_PATH, "SFace (recognition)")
153
-
154
- # logger.info("[AI] Đang khởi tạo YuNet + SFace...")
155
-
156
- # # YuNet: phát hiện khuôn mặt, det_size sẽ update khi gọi
157
- # self._detector = cv2.FaceDetectorYN.create(
158
- # YUNET_PATH,
159
- # "",
160
- # (320, 240), # input size mặc định (sẽ update theo ảnh thực)
161
- # score_threshold=0.6,
162
- # nms_threshold=0.3,
163
- # top_k=5,
164
- # )
165
-
166
- # # SFace: trích xuất embedding 128-dim
167
- # self._recognizer = cv2.FaceRecognizerSF.create(
168
- # SFACE_PATH, ""
169
- # )
170
-
171
- # logger.info("[AI] YuNet + SFace sẵn sàng")
172
-
173
- # # ── Decode ảnh bytes → BGR numpy ──────────────────────────────────────
174
  # @staticmethod
175
- # def _decode(file_bytes: bytes) -> Optional[np.ndarray]:
176
  # try:
177
- # nparr = np.frombuffer(file_bytes, np.uint8)
178
- # img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
179
- # if img is not None:
180
- # return img
181
- # except Exception:
182
- # pass
183
  # try:
184
  # pil = Image.open(io.BytesIO(file_bytes)).convert("RGB")
185
  # return cv2.cvtColor(np.array(pil), cv2.COLOR_RGB2BGR)
186
  # except Exception as e:
187
- # logger.error(f"[AI] Không đọc ảnh: {e}")
188
- # return None
189
 
190
  # def extract_faces(self, file_bytes: bytes) -> list[dict]:
191
- # """
192
- # Nhận bytes ảnh → list { box, descriptor }.
193
- # Tổng thời gian: ~25–50ms trên CPU.
194
- # """
195
  # img = self._decode(file_bytes)
196
- # if img is None:
197
- # return []
198
-
199
  # h, w = img.shape[:2]
200
-
201
- # # Update input size cho YuNet theo kích thước ảnh thực
202
  # self._detector.setInputSize((w, h))
203
-
204
- # # ── Detect ────────────────────────────────────────────────────────
205
  # _, faces_raw = self._detector.detect(img)
206
-
207
- # if faces_raw is None or len(faces_raw) == 0:
208
- # logger.info("[AI] Không phát hiện khuôn mặt")
209
- # return []
210
-
211
- # logger.info(f"[AI] Phát hiện {len(faces_raw)} khuôn mặt")
212
-
213
  # results = []
214
- # for face_data in faces_raw:
215
- # # face_data: [x, y, w, h, lm_x0, lm_y0, ..., score]
216
- # x, y, fw, fh = [int(v) for v in face_data[:4]]
217
- # det_score = float(face_data[-1])
218
-
219
- # # Đảm bảo bbox nằm trong ảnh
220
- # x = max(0, x)
221
- # y = max(0, y)
222
- # fw = min(fw, w - x)
223
- # fh = min(fh, h - y)
224
-
225
- # # ── Encode ────────────────────────────────────────────────────
226
- # # alignCrop: crop + align theo landmark → embedding chuẩn hơn
227
- # aligned = self._recognizer.alignCrop(img, face_data)
228
- # feature = self._recognizer.feature(aligned) # shape (1, 128)
229
- # encoding = feature.flatten().tolist() # list[float] 128 phần tử
230
-
231
  # results.append({
232
- # "box": {
233
- # "x": x,
234
- # "y": y,
235
- # "width": fw,
236
- # "height": fh,
237
- # },
238
- # "descriptor": encoding,
239
- # "det_score": det_score,
240
  # })
241
-
242
  # return results
243
 
244
  # @staticmethod
245
  # def save_image(file_bytes: bytes, person_id: str, index: int = 0) -> str:
246
- # """Lưu ảnh vào uploads/, trả về đường dẫn."""
247
  # filename = f"{person_id}_{index}.jpg"
248
  # filepath = os.path.join(UPLOAD_DIR, filename)
249
- # nparr = np.frombuffer(file_bytes, np.uint8)
250
- # img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
251
  # if img is not None:
252
  # cv2.imwrite(filepath, img, [cv2.IMWRITE_JPEG_QUALITY, 90])
253
  # else:
254
- # with open(filepath, "wb") as f:
255
- # f.write(file_bytes)
256
  # return filepath
257
 
258
 
259
- # # ─── Singleton ────────────────────────────────────────────────────────────────
260
  # face_ai_service = FaceAiService()
261
  # face_memory_store = FaceMemoryStore()
262
-
263
-
264
- import cv2, numpy as np, io, os, threading, logging, urllib.request
265
  from dataclasses import dataclass, field
266
  from typing import Optional
267
  from PIL import Image
@@ -417,17 +588,22 @@ class FaceAiService:
417
  logger.info(f"[AI] {len(results)} khuôn mặt")
418
  return results
419
 
 
420
  @staticmethod
421
- def save_image(file_bytes: bytes, person_id: str, index: int = 0) -> str:
422
  filename = f"{person_id}_{index}.jpg"
423
  filepath = os.path.join(UPLOAD_DIR, filename)
424
  arr = np.frombuffer(file_bytes, np.uint8)
425
  img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
426
  if img is not None:
427
  cv2.imwrite(filepath, img, [cv2.IMWRITE_JPEG_QUALITY, 90])
 
 
428
  else:
429
- with open(filepath, "wb") as f: f.write(file_bytes)
430
- return filepath
 
 
431
 
432
 
433
  face_ai_service = FaceAiService()
 
1
+ # # import cv2
2
+ # # import numpy as np
3
+ # # import io
4
+ # # import os
5
+ # # import threading
6
+ # # import logging
7
+ # # import urllib.request
8
+ # # from dataclasses import dataclass
9
+ # # from typing import Optional
10
+ # # from PIL import Image
11
+ # # logger = logging.getLogger(__name__)
12
+ # # MODEL_DIR = "models"
13
+ # # UPLOAD_DIR = "uploads"
14
+ # # YUNET_PATH = os.path.join(MODEL_DIR, "face_detection_yunet_2023mar.onnx")
15
+ # # SFACE_PATH = os.path.join(MODEL_DIR, "face_recognition_sface_2021dec.onnx")
16
+ # # YUNET_URL = "https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx"
17
+ # # SFACE_URL = "https://github.com/opencv/opencv_zoo/raw/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx"
18
+ # # COSINE_THRESHOLD = 0.40
19
+ # # os.makedirs(MODEL_DIR, exist_ok=True)
20
+ # # os.makedirs(UPLOAD_DIR, exist_ok=True)
21
+ # # def _download_model(url: str, path: str, name: str) -> None:
22
+ # # """Tải model nếu chưa có, hiển thị tiến trình."""
23
+ # # if os.path.exists(path):
24
+ # # return
25
+ # # logger.info(f"[Model] Đang tải {name}... (~{url.split('/')[-1]})")
26
+
27
+ # # def _progress(count, block_size, total_size):
28
+ # # pct = int(count * block_size * 100 / total_size) if total_size > 0 else 0
29
+ # # print(f"\r [{name}] {min(pct, 100)}%", end="", flush=True)
30
+
31
+ # # urllib.request.urlretrieve(url, path, _progress)
32
+ # # print()
33
+ # # logger.info(f" {name} đã tải xong → {path}")
34
+
35
+
36
+ # # # ═════════════════════════════════════════════════════════════════════════════
37
+ # # # FaceMemoryStore — In-Memory RAM, thread-safe
38
+ # # # ═════════════════════════════════════════════════════════════════════════════
39
+ # # @dataclass
40
+ # # class CachedFace:
41
+ # # person_id: str
42
+ # # name: str
43
+ # # role: str
44
+ # # img_path: str
45
+ # # encoding: np.ndarray # 128-dim SFace feature (L2-normalized)
46
+
47
+
48
+ # # class FaceMemoryStore:
49
+ # # """Toàn bộ encoding lưu trên RAM. Nhận diện không cần đụng DB."""
50
+
51
+ # # def __init__(self):
52
+ # # self._faces: list[CachedFace] = []
53
+ # # self._lock = threading.RLock()
54
+ # # self._loaded = False
55
+
56
+ # # @property
57
+ # # def is_loaded(self) -> bool:
58
+ # # return self._loaded
59
+
60
+ # # @property
61
+ # # def count(self) -> int:
62
+ # # with self._lock:
63
+ # # return len(self._faces)
64
+
65
+ # # # ── Startup: nạp từ DB ────────────────────────────────────────────────
66
+ # # def load_all(self, rows: list[dict]) -> None:
67
+ # # with self._lock:
68
+ # # self._faces = []
69
+ # # for row in rows:
70
+ # # try:
71
+ # # enc = np.array(row["embedding_vector"], dtype=np.float32)
72
+ # # enc = self._normalize(enc)
73
+ # # self._faces.append(CachedFace(
74
+ # # person_id=row["person_id"],
75
+ # # name=row["name"],
76
+ # # role=row.get("role", ""),
77
+ # # img_path=row.get("img_path", ""),
78
+ # # encoding=enc,
79
+ # # ))
80
+ # # except Exception as e:
81
+ # # logger.warning(f"[RAM] Bỏ qua {row.get('name')}: {e}")
82
+ # # self._loaded = True
83
+ # # logger.info(f" {len(self._faces)} khuôn mặt")
84
+
85
+ # # # ── CRUD real-time ────────────────────────────────────────────────────
86
+ # # def add(self, person_id: str, name: str, role: str, img_path: str, encoding: list[float]) -> None:
87
+ # # enc = self._normalize(np.array(encoding, dtype=np.float32))
88
+ # # with self._lock:
89
+ # # self._faces.append(CachedFace(person_id, name, role, img_path, enc))
90
+ # # logger.info(f" {name} | Tổng: {self.count}")
91
+
92
+ # # def remove_by_person(self, person_id: str) -> int:
93
+ # # with self._lock:
94
+ # # before = len(self._faces)
95
+ # # self._faces = [f for f in self._faces if f.person_id != person_id]
96
+ # # return before - len(self._faces)
97
+
98
+ # # def update_info(self, person_id: str, name: str, role: str) -> None:
99
+ # # with self._lock:
100
+ # # for f in self._faces:
101
+ # # if f.person_id == person_id:
102
+ # # f.name = name
103
+ # # f.role = role
104
+
105
+ # # # ── Nhận diện vectorized cosine ───────────────────────────────────────
106
+ # # def find_best_match(
107
+ # # self,
108
+ # # query_enc: np.ndarray,
109
+ # # threshold: float = COSINE_THRESHOLD,
110
+ # # ) -> Optional[dict]:
111
+ # # """
112
+ # # Cosine similarity = dot product (đã normalize).
113
+ # # Numpy matrix multiply → tính tất cả N embedding cùng lúc.
114
+ # # """
115
+ # # with self._lock:
116
+ # # if not self._faces:
117
+ # # return None
118
+
119
+ # # q = self._normalize(query_enc)
120
+ # # matrix = np.stack([f.encoding for f in self._faces]) # (N, 128)
121
+ # # scores = matrix @ q # (N,) cosine sim
122
+
123
+ # # idx = int(np.argmax(scores))
124
+ # # score = float(scores[idx])
125
+
126
+ # # if score >= threshold:
127
+ # # best = self._faces[idx]
128
+ # # return {
129
+ # # "person_id": best.person_id,
130
+ # # "name": best.name,
131
+ # # "role": best.role,
132
+ # # "img_path": best.img_path,
133
+ # # "score": score,
134
+ # # "distance": 1.0 - score,
135
+ # # }
136
+ # # return None
137
+
138
+ # # @staticmethod
139
+ # # def _normalize(v: np.ndarray) -> np.ndarray:
140
+ # # n = np.linalg.norm(v)
141
+ # # return v / n if n > 0 else v
142
+
143
+
144
+ # # # ═════════════════════════════════════════════════════════════════════════════
145
+ # # # FaceAiService — OpenCV YuNet detector + SFace recognizer
146
+ # # # ═════════════════════════════════════════════════════════════════════════════
147
+ # # class FaceAiService:
148
+
149
+ # # def __init__(self):
150
+ # # # Tải model nếu chưa có
151
+ # # _download_model(YUNET_URL, YUNET_PATH, "YuNet (detection)")
152
+ # # _download_model(SFACE_URL, SFACE_PATH, "SFace (recognition)")
153
+
154
+ # # logger.info("[AI] Đang khởi tạo YuNet + SFace...")
155
+
156
+ # # # YuNet: phát hiện khuôn mặt, det_size sẽ update khi gọi
157
+ # # self._detector = cv2.FaceDetectorYN.create(
158
+ # # YUNET_PATH,
159
+ # # "",
160
+ # # (320, 240), # input size mặc định (sẽ update theo ảnh thực)
161
+ # # score_threshold=0.6,
162
+ # # nms_threshold=0.3,
163
+ # # top_k=5,
164
+ # # )
165
+
166
+ # # # SFace: trích xuất embedding 128-dim
167
+ # # self._recognizer = cv2.FaceRecognizerSF.create(
168
+ # # SFACE_PATH, ""
169
+ # # )
170
+
171
+ # # logger.info("[AI] YuNet + SFace sẵn sàng")
172
+
173
+ # # # ── Decode ảnh bytes → BGR numpy ──────────────────────────────────────
174
+ # # @staticmethod
175
+ # # def _decode(file_bytes: bytes) -> Optional[np.ndarray]:
176
+ # # try:
177
+ # # nparr = np.frombuffer(file_bytes, np.uint8)
178
+ # # img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
179
+ # # if img is not None:
180
+ # # return img
181
+ # # except Exception:
182
+ # # pass
183
+ # # try:
184
+ # # pil = Image.open(io.BytesIO(file_bytes)).convert("RGB")
185
+ # # return cv2.cvtColor(np.array(pil), cv2.COLOR_RGB2BGR)
186
+ # # except Exception as e:
187
+ # # logger.error(f"[AI] Không đọc ảnh: {e}")
188
+ # # return None
189
+
190
+ # # def extract_faces(self, file_bytes: bytes) -> list[dict]:
191
+ # # """
192
+ # # Nhận bytes ảnh → list { box, descriptor }.
193
+ # # Tổng thời gian: ~25–50ms trên CPU.
194
+ # # """
195
+ # # img = self._decode(file_bytes)
196
+ # # if img is None:
197
+ # # return []
198
+
199
+ # # h, w = img.shape[:2]
200
+
201
+ # # # Update input size cho YuNet theo kích thước ảnh thực
202
+ # # self._detector.setInputSize((w, h))
203
+
204
+ # # # ── Detect ────────────────────────────────────────────────────────
205
+ # # _, faces_raw = self._detector.detect(img)
206
+
207
+ # # if faces_raw is None or len(faces_raw) == 0:
208
+ # # logger.info("[AI] Không phát hiện khuôn mặt")
209
+ # # return []
210
+
211
+ # # logger.info(f"[AI] Phát hiện {len(faces_raw)} khuôn mặt")
212
+
213
+ # # results = []
214
+ # # for face_data in faces_raw:
215
+ # # # face_data: [x, y, w, h, lm_x0, lm_y0, ..., score]
216
+ # # x, y, fw, fh = [int(v) for v in face_data[:4]]
217
+ # # det_score = float(face_data[-1])
218
+
219
+ # # # Đảm bảo bbox nằm trong ảnh
220
+ # # x = max(0, x)
221
+ # # y = max(0, y)
222
+ # # fw = min(fw, w - x)
223
+ # # fh = min(fh, h - y)
224
+
225
+ # # # ── Encode ────────────────────────────────────────────────────
226
+ # # # alignCrop: crop + align theo landmark → embedding chuẩn hơn
227
+ # # aligned = self._recognizer.alignCrop(img, face_data)
228
+ # # feature = self._recognizer.feature(aligned) # shape (1, 128)
229
+ # # encoding = feature.flatten().tolist() # list[float] 128 phần tử
230
+
231
+ # # results.append({
232
+ # # "box": {
233
+ # # "x": x,
234
+ # # "y": y,
235
+ # # "width": fw,
236
+ # # "height": fh,
237
+ # # },
238
+ # # "descriptor": encoding,
239
+ # # "det_score": det_score,
240
+ # # })
241
+
242
+ # # return results
243
+
244
+ # # @staticmethod
245
+ # # def save_image(file_bytes: bytes, person_id: str, index: int = 0) -> str:
246
+ # # """Lưu ảnh vào uploads/, trả về đường dẫn."""
247
+ # # filename = f"{person_id}_{index}.jpg"
248
+ # # filepath = os.path.join(UPLOAD_DIR, filename)
249
+ # # nparr = np.frombuffer(file_bytes, np.uint8)
250
+ # # img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
251
+ # # if img is not None:
252
+ # # cv2.imwrite(filepath, img, [cv2.IMWRITE_JPEG_QUALITY, 90])
253
+ # # else:
254
+ # # with open(filepath, "wb") as f:
255
+ # # f.write(file_bytes)
256
+ # # return filepath
257
+
258
+
259
+ # # # ─── Singleton ────────────────────────────────────────────────────────────────
260
+ # # face_ai_service = FaceAiService()
261
+ # # face_memory_store = FaceMemoryStore()
262
+
263
+
264
+ # import cv2, numpy as np, io, os, threading, logging, urllib.request
265
+ # from dataclasses import dataclass, field
266
  # from typing import Optional
267
  # from PIL import Image
268
+
269
  # logger = logging.getLogger(__name__)
270
+
271
+ # MODEL_DIR = "models"
272
+ # UPLOAD_DIR = "uploads"
273
+ # YUNET_PATH = os.path.join(MODEL_DIR, "face_detection_yunet_2023mar.onnx")
274
+ # SFACE_PATH = os.path.join(MODEL_DIR, "face_recognition_sface_2021dec.onnx")
275
+ # YUNET_URL = "https://github.com/opencv/opencv_zoo/raw/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx"
276
  # SFACE_URL = "https://github.com/opencv/opencv_zoo/raw/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx"
277
  # COSINE_THRESHOLD = 0.40
278
+
279
  # os.makedirs(MODEL_DIR, exist_ok=True)
280
  # os.makedirs(UPLOAD_DIR, exist_ok=True)
 
 
 
 
 
281
 
 
 
 
282
 
283
+ # def _download_model(url, path, name):
284
+ # if os.path.exists(path): return
285
+ # logger.info(f"[Model] Tải {name}...")
286
+ # urllib.request.urlretrieve(url, path)
287
+ # logger.info(f"[Model] {name} → {path}")
288
 
289
 
 
 
 
290
  # @dataclass
291
  # class CachedFace:
292
+ # person_id: str
293
+ # name: str
294
+ # role: str
295
+ # img_path: str
296
+ # encoding: np.ndarray
297
+ # work_expiry_date: Optional[str] = None # "YYYY-MM-DD" hoặc None
298
 
299
 
300
  # class FaceMemoryStore:
 
 
301
  # def __init__(self):
302
  # self._faces: list[CachedFace] = []
303
  # self._lock = threading.RLock()
304
  # self._loaded = False
305
 
306
  # @property
307
+ # def is_loaded(self): return self._loaded
 
308
 
309
  # @property
310
+ # def count(self):
311
+ # with self._lock: return len(self._faces)
 
312
 
313
+ # def load_all(self, rows: list[dict]):
 
314
  # with self._lock:
315
  # self._faces = []
316
  # for row in rows:
317
  # try:
318
+ # enc = self._norm(np.array(row["embedding_vector"], dtype=np.float32))
 
319
  # self._faces.append(CachedFace(
320
  # person_id=row["person_id"],
321
  # name=row["name"],
322
+ # role=row.get("role",""),
323
+ # img_path=row.get("img_path",""),
324
  # encoding=enc,
325
+ # work_expiry_date=row.get("work_expiry_date"),
326
  # ))
327
  # except Exception as e:
328
+ # logger.warning(f"[RAM] Skip {row.get('name')}: {e}")
329
  # self._loaded = True
330
+ # logger.info(f"[RAM] {len(self._faces)} khuôn mặt")
331
 
332
+ # def add(self, person_id, name, role, img_path, encoding, work_expiry_date=None):
333
+ # enc = self._norm(np.array(encoding, dtype=np.float32))
 
334
  # with self._lock:
335
+ # self._faces.append(CachedFace(person_id, name, role, img_path, enc, work_expiry_date))
336
+ # logger.info(f"[RAM] {name} | Tổng: {self.count}")
337
 
338
+ # def remove_by_person(self, person_id):
339
  # with self._lock:
340
  # before = len(self._faces)
341
  # self._faces = [f for f in self._faces if f.person_id != person_id]
342
  # return before - len(self._faces)
343
 
344
+ # def update_info(self, person_id, name, role):
345
  # with self._lock:
346
  # for f in self._faces:
347
  # if f.person_id == person_id:
348
+ # f.name = name; f.role = role
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
 
350
+ # def find_best_match(self, query_enc: np.ndarray, threshold=COSINE_THRESHOLD) -> Optional[dict]:
351
+ # with self._lock:
352
+ # if not self._faces: return None
353
+ # q = self._norm(query_enc)
354
+ # matrix = np.stack([f.encoding for f in self._faces])
355
+ # scores = matrix @ q
356
+ # idx = int(np.argmax(scores))
357
+ # score = float(scores[idx])
358
+ # if 1.0 - score < threshold:
359
  # best = self._faces[idx]
360
  # return {
361
+ # "person_id": best.person_id,
362
+ # "name": best.name,
363
+ # "role": best.role,
364
+ # "img_path": best.img_path,
365
+ # "score": score,
366
+ # "distance": 1.0 - score,
367
+ # "work_expiry_date": best.work_expiry_date,
368
  # }
369
  # return None
370
 
371
  # @staticmethod
372
+ # def _norm(v: np.ndarray) -> np.ndarray:
373
  # n = np.linalg.norm(v)
374
  # return v / n if n > 0 else v
375
 
376
 
 
 
 
377
  # class FaceAiService:
 
378
  # def __init__(self):
379
+ # _download_model(YUNET_URL, YUNET_PATH, "YuNet")
380
+ # _download_model(SFACE_URL, SFACE_PATH, "SFace")
381
+ # logger.info("[AI] Khởi tạo YuNet + SFace...")
382
+ # self._detector = cv2.FaceDetectorYN.create(YUNET_PATH, "", (320,240), score_threshold=0.6, nms_threshold=0.3, top_k=5)
383
+ # self._recognizer = cv2.FaceRecognizerSF.create(SFACE_PATH, "")
384
+ # logger.info("[AI] Sẵn sàng")
385
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386
  # @staticmethod
387
+ # def _decode(file_bytes: bytes):
388
  # try:
389
+ # arr = np.frombuffer(file_bytes, np.uint8)
390
+ # img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
391
+ # if img is not None: return img
392
+ # except Exception: pass
 
 
393
  # try:
394
  # pil = Image.open(io.BytesIO(file_bytes)).convert("RGB")
395
  # return cv2.cvtColor(np.array(pil), cv2.COLOR_RGB2BGR)
396
  # except Exception as e:
397
+ # logger.error(f"[AI] Không đọc ảnh: {e}"); return None
 
398
 
399
  # def extract_faces(self, file_bytes: bytes) -> list[dict]:
 
 
 
 
400
  # img = self._decode(file_bytes)
401
+ # if img is None: return []
 
 
402
  # h, w = img.shape[:2]
 
 
403
  # self._detector.setInputSize((w, h))
 
 
404
  # _, faces_raw = self._detector.detect(img)
405
+ # if faces_raw is None or len(faces_raw) == 0: return []
 
 
 
 
 
 
406
  # results = []
407
+ # for fd in faces_raw:
408
+ # x,y,fw,fh = [int(v) for v in fd[:4]]
409
+ # x=max(0,x); y=max(0,y); fw=min(fw,w-x); fh=min(fh,h-y)
410
+ # aligned = self._recognizer.alignCrop(img, fd)
411
+ # feature = self._recognizer.feature(aligned)
 
 
 
 
 
 
 
 
 
 
 
 
412
  # results.append({
413
+ # "box": {"x":x,"y":y,"width":fw,"height":fh},
414
+ # "descriptor": feature.flatten().tolist(),
415
+ # "det_score": float(fd[-1]),
 
 
 
 
 
416
  # })
417
+ # logger.info(f"[AI] {len(results)} khuôn mặt")
418
  # return results
419
 
420
  # @staticmethod
421
  # def save_image(file_bytes: bytes, person_id: str, index: int = 0) -> str:
 
422
  # filename = f"{person_id}_{index}.jpg"
423
  # filepath = os.path.join(UPLOAD_DIR, filename)
424
+ # arr = np.frombuffer(file_bytes, np.uint8)
425
+ # img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
426
  # if img is not None:
427
  # cv2.imwrite(filepath, img, [cv2.IMWRITE_JPEG_QUALITY, 90])
428
  # else:
429
+ # with open(filepath, "wb") as f: f.write(file_bytes)
 
430
  # return filepath
431
 
432
 
 
433
  # face_ai_service = FaceAiService()
434
  # face_memory_store = FaceMemoryStore()
435
+ import cv2, numpy as np, io, os, threading, logging, urllib.request, base64
 
 
436
  from dataclasses import dataclass, field
437
  from typing import Optional
438
  from PIL import Image
 
588
  logger.info(f"[AI] {len(results)} khuôn mặt")
589
  return results
590
 
591
+ # ── THÊM: return cả (filepath, data_url), logic lưu file giữ nguyên ──
592
  @staticmethod
593
+ def save_image(file_bytes: bytes, person_id: str, index: int = 0) -> tuple[str, str]:
594
  filename = f"{person_id}_{index}.jpg"
595
  filepath = os.path.join(UPLOAD_DIR, filename)
596
  arr = np.frombuffer(file_bytes, np.uint8)
597
  img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
598
  if img is not None:
599
  cv2.imwrite(filepath, img, [cv2.IMWRITE_JPEG_QUALITY, 90])
600
+ _, buffer = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, 90])
601
+ data_url = f"data:image/jpeg;base64,{base64.b64encode(buffer).decode('utf-8')}"
602
  else:
603
+ with open(filepath, "wb") as f:
604
+ f.write(file_bytes)
605
+ data_url = f"data:image/jpeg;base64,{base64.b64encode(file_bytes).decode('utf-8')}"
606
+ return filepath, data_url # ← (path cũ, base64 mới)
607
 
608
 
609
  face_ai_service = FaceAiService()