drixo commited on
Commit
f071a9b
·
1 Parent(s): 274c00a

Add face recognition and motion detection

Browse files
.gitignore ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Snapshots and caches
2
+ snapshots/
3
+ known_faces/encodings.pkl
4
+ __pycache__/
5
+ *.pyc
6
+ .pytest_cache/
7
+
8
+ # Optional: uncomment to avoid committing face photos
9
+ # known_faces/*.jpg
10
+ # known_faces/*.jpeg
11
+ # known_faces/*.png
README.md CHANGED
@@ -9,4 +9,63 @@ app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  pinned: false
10
  ---
11
 
12
+ # Smartdoor Detection, face recognition & motion
13
+
14
+ Object detection with **YOLOv8n** (person, dog, cat, etc.), optional **face recognition** (e.g. "Danny is at the door"), and **motion-only** mode to save CPU.
15
+
16
+ ## Run locally
17
+
18
+ ```bash
19
+ pip install -r requirements.txt
20
+ # Camera
21
+ python main.py
22
+ # With face recognition (add photos in known_faces/ as name.jpg)
23
+ python main.py --known-faces known_faces
24
+ # Only run detection when motion (saves CPU)
25
+ python main.py --motion-only
26
+ # Video file + snapshots
27
+ python main.py --source video.mp4 --snapshots .
28
+ ```
29
+
30
+ ## Face recognition
31
+
32
+ Put one image per person in `known_faces/` named by the label you want (e.g. `danny.jpg`). Use a clear front-facing face. Encodings are cached in `known_faces/encodings.pkl`.
33
+
34
+ ## Structure
35
+
36
+ - `detector/` — YOLOv8n object detection (boxes + labels)
37
+ - `recognizer/` — face recognition (match known people)
38
+ - `notifier/` — log, print, snapshot
39
+ - `motion.py` — motion detection (optional trigger)
40
+ - `main.py` — camera/video pipeline
41
+ - `app.py` — Gradio demo for Hugging Face Spaces
42
+
43
+ ## Push to Hugging Face
44
+
45
+ 1. Create a Space at [huggingface.co/spaces](https://huggingface.co/spaces) (e.g. **Gradio** SDK).
46
+ 2. Clone your Space and add this repo’s files, or push from an existing clone:
47
+
48
+ ```bash
49
+ cd smartdoor
50
+ git remote add space https://huggingface.co/spaces/YOUR_USERNAME/smartdoor # if not already
51
+ git add .
52
+ git commit -m "Add face recognition and motion detection"
53
+ git push space main
54
+ ```
55
+
56
+ 3. If the Space repo was created empty, you can also use the HF CLI:
57
+
58
+ ```bash
59
+ pip install huggingface_hub
60
+ huggingface-cli login
61
+ git clone https://huggingface.co/spaces/YOUR_USERNAME/smartdoor
62
+ cp -r smartdoor/* smartdoor/.gitignore smartdoor/README.md smartdoor/requirements.txt ./smartdoor/ # copy app, etc.
63
+ cd smartdoor
64
+ git add .
65
+ git commit -m "Add face recognition and motion"
66
+ git push origin main
67
+ ```
68
+
69
+ **Note:** The Gradio Space runs `app.py`; face recognition works if you add images under `known_faces/` in the Space (e.g. via the Files UI or at build time).
70
+
71
+ Configuration reference: https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Gradio app for Smartdoor — upload image, run detection + optional face recognition.
3
+ """
4
+
5
+ import gradio as gr
6
+ from pathlib import Path
7
+ import sys
8
+ import cv2
9
+ import numpy as np
10
+
11
+ sys.path.insert(0, str(Path(__file__).resolve().parent))
12
+ from detector import Detector
13
+
14
+ try:
15
+ from recognizer import FaceRecognizer
16
+ _recognizer = None
17
+ _known = Path(__file__).resolve().parent / "known_faces"
18
+ if _known.exists():
19
+ _recognizer = FaceRecognizer(_known)
20
+ if not _recognizer.is_available:
21
+ _recognizer = None
22
+ except Exception:
23
+ _recognizer = None
24
+
25
+ detector = Detector("yolov8n.pt")
26
+
27
+
28
+ def run_detection(image):
29
+ if image is None:
30
+ return None, ""
31
+ if isinstance(image, np.ndarray):
32
+ frame = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if image.ndim == 3 else image
33
+ else:
34
+ frame = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
35
+ detections = detector.detect(frame, conf_threshold=0.25)
36
+ annotated = detector.annotate_frame(frame, detections)
37
+ lines = []
38
+ person_names = []
39
+ face_results = []
40
+ if _recognizer is not None and _recognizer.is_available:
41
+ person_boxes = [d.xyxy for d in detections if detector.is_person(d.label)]
42
+ if person_boxes:
43
+ face_results = _recognizer.recognize_faces_in_frame(frame, person_boxes)
44
+ person_names = [r[1] for r in face_results]
45
+ person_idx = 0
46
+ for d in detections:
47
+ if detector.is_person(d.label):
48
+ name = person_names[person_idx] if person_idx < len(person_names) else None
49
+ lines.append("{} is at the door".format(name) if name else "Person detected")
50
+ person_idx += 1
51
+ elif detector.is_animal(d.label):
52
+ lines.append("{} detected".format(d.label))
53
+ else:
54
+ lines.append("{} detected".format(d.label))
55
+ for (fx1, fy1, fx2, fy2), name in face_results:
56
+ if name:
57
+ cv2.rectangle(annotated, (fx1, fy1), (fx2, fy2), (255, 0, 0), 2)
58
+ cv2.putText(
59
+ annotated, name, (fx1, fy1 - 8),
60
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2, cv2.LINE_AA
61
+ )
62
+ text = "\n".join(lines) if lines else "No person/animal detected"
63
+ annotated_rgb = cv2.cvtColor(annotated, cv2.COLOR_BGR2RGB)
64
+ return annotated_rgb, text
65
+
66
+
67
+ with gr.Blocks(title="Smartdoor — Detection & Faces") as app:
68
+ gr.Markdown(
69
+ "# Smartdoor — Object detection & face recognition\n"
70
+ "Upload an image. Add photos in `known_faces/` as `name.jpg` to recognize people."
71
+ )
72
+ with gr.Row():
73
+ inp = gr.Image(label="Upload or paste image", type="numpy")
74
+ out_img = gr.Image(label="Detections")
75
+ out_text = gr.Textbox(label="Detected", lines=4)
76
+ btn = gr.Button("Detect")
77
+ btn.click(fn=run_detection, inputs=inp, outputs=[out_img, out_text])
78
+
79
+ if __name__ == "__main__":
80
+ app.launch()
detector/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .detector import Detector, Detection
2
+
3
+ __all__ = ["Detector", "Detection"]
detector/detector.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Object detection using YOLOv8n (nano). CPU-friendly, no face recognition.
3
+ Returns bounding boxes + labels for person, dog, cat, etc.
4
+ """
5
+
6
+ from dataclasses import dataclass
7
+ from typing import List, Tuple
8
+
9
+ import cv2
10
+ from ultralytics import YOLO
11
+
12
+
13
+ # Classes we care about for door / animal use case
14
+ PERSON_LABEL = "person"
15
+ ANIMAL_LABELS = frozenset({"dog", "cat", "bird", "horse", "sheep", "cow", "bear"})
16
+
17
+
18
+ @dataclass
19
+ class Detection:
20
+ """Single detection: label + bbox (xyxy)."""
21
+ label: str
22
+ confidence: float
23
+ xyxy: Tuple[float, float, float, float] # x1, y1, x2, y2
24
+
25
+
26
+ class Detector:
27
+ """
28
+ YOLOv8n-based detector. Load once, run on frames.
29
+ """
30
+
31
+ def __init__(self, model_path: str = "yolov8n.pt"):
32
+ self.model = YOLO(model_path)
33
+ self.names = self.model.names
34
+
35
+ def detect(self, frame, conf_threshold: float = 0.25) -> List[Detection]:
36
+ """
37
+ Run detection on a BGR frame (e.g. from cv2).
38
+ Returns list of Detection with label, confidence, bbox.
39
+ """
40
+ results = self.model(frame, conf=conf_threshold, verbose=False)
41
+ out = []
42
+ for r in results:
43
+ if r.boxes is None:
44
+ continue
45
+ for box in r.boxes:
46
+ cls_id = int(box.cls[0])
47
+ label = self.names[cls_id]
48
+ conf = float(box.conf[0])
49
+ xyxy = tuple(map(float, box.xyxy[0]))
50
+ out.append(Detection(label=label, confidence=conf, xyxy=xyxy))
51
+ return out
52
+
53
+ def is_person(self, label: str) -> bool:
54
+ return label == PERSON_LABEL
55
+
56
+ def is_animal(self, label: str) -> bool:
57
+ return label in ANIMAL_LABELS
58
+
59
+ def annotate_frame(self, frame, detections: List[Detection]):
60
+ """Draw bounding boxes and labels on a copy of the frame."""
61
+ annotated = frame.copy()
62
+ for d in detections:
63
+ x1, y1, x2, y2 = map(int, d.xyxy)
64
+ cv2.rectangle(annotated, (x1, y1), (x2, y2), (0, 255, 0), 2)
65
+ text = "{} {:.2f}".format(d.label, d.confidence)
66
+ cv2.putText(
67
+ annotated, text, (x1, y1 - 8),
68
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA
69
+ )
70
+ return annotated
known_faces/README.md ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Known faces (face recognition)
2
+
3
+ Add one image per person, named by the label you want announced:
4
+
5
+ - `danny.jpg` → "Danny is at the door"
6
+ - `jane.jpg` → "Jane is at the door"
7
+
8
+ Use a clear front-facing face photo. On first run, encodings are computed and cached in `encodings.pkl`.
main.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Smartdoor — object detection, optional face recognition and motion trigger.
3
+ Pipeline: (optional motion) → capture frame → detection → face recognition for persons → notify → draw.
4
+ """
5
+
6
+ import argparse
7
+ import logging
8
+ import sys
9
+ from pathlib import Path
10
+
11
+ import cv2
12
+
13
+ sys.path.insert(0, str(Path(__file__).resolve().parent))
14
+
15
+ from detector import Detector
16
+ from notifier import Notifier
17
+ from motion import MotionDetector
18
+
19
+ try:
20
+ from recognizer import FaceRecognizer
21
+ except ImportError:
22
+ FaceRecognizer = None
23
+
24
+
25
+ def setup_logging(level=logging.INFO):
26
+ logging.basicConfig(
27
+ level=level,
28
+ format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
29
+ datefmt="%Y-%m-%d %H:%M:%S",
30
+ )
31
+
32
+
33
+ def run(
34
+ source=0,
35
+ conf=0.25,
36
+ snapshot_dir=None,
37
+ no_show=False,
38
+ motion_only=False,
39
+ known_faces_dir=None,
40
+ ):
41
+ setup_logging()
42
+ detector = Detector("yolov8n.pt")
43
+ notifier = Notifier(log_to_console=True, snapshot_dir=snapshot_dir)
44
+
45
+ recognizer = None
46
+ if known_faces_dir and Path(known_faces_dir).exists() and FaceRecognizer is not None:
47
+ recognizer = FaceRecognizer(Path(known_faces_dir))
48
+ if recognizer.is_available:
49
+ logging.info("Face recognition enabled for %s", known_faces_dir)
50
+ else:
51
+ recognizer = None
52
+
53
+ motion = MotionDetector(threshold=25.0, min_area=500) if motion_only else None
54
+
55
+ cap = cv2.VideoCapture(source)
56
+ if not cap.isOpened():
57
+ logging.error("Could not open video source: %s", source)
58
+ return 1
59
+
60
+ try:
61
+ while True:
62
+ ret, frame = cap.read()
63
+ if not ret:
64
+ break
65
+
66
+ if motion is not None and not motion.has_motion(frame):
67
+ if not no_show:
68
+ cv2.imshow("Smartdoor — Detection", frame)
69
+ if cv2.waitKey(1) == 27:
70
+ break
71
+ continue
72
+
73
+ detections = detector.detect(frame, conf_threshold=conf)
74
+ person_names = []
75
+ face_results = []
76
+ if recognizer is not None and recognizer.is_available:
77
+ person_boxes = [d.xyxy for d in detections if detector.is_person(d.label)]
78
+ if person_boxes:
79
+ face_results = recognizer.recognize_faces_in_frame(frame, person_boxes)
80
+ person_names = [(i, face_results[i][1]) for i in range(len(face_results))]
81
+
82
+ notifier.on_detections(frame, detections, detector, person_names=person_names)
83
+ annotated = detector.annotate_frame(frame, detections)
84
+
85
+ if recognizer is not None and face_results:
86
+ for (fx1, fy1, fx2, fy2), name in face_results:
87
+ if name:
88
+ cv2.rectangle(annotated, (fx1, fy1), (fx2, fy2), (255, 0, 0), 2)
89
+ cv2.putText(
90
+ annotated, name, (fx1, fy1 - 8),
91
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2, cv2.LINE_AA
92
+ )
93
+
94
+ if not no_show:
95
+ cv2.imshow("Smartdoor — Detection", annotated)
96
+ if cv2.waitKey(1) == 27:
97
+ break
98
+ finally:
99
+ cap.release()
100
+ cv2.destroyAllWindows()
101
+
102
+ return 0
103
+
104
+
105
+ def main():
106
+ p = argparse.ArgumentParser(
107
+ description="Smartdoor: object detection, face recognition, motion trigger"
108
+ )
109
+ p.add_argument(
110
+ "--source",
111
+ default=0,
112
+ help="Camera index (default 0) or path to video file",
113
+ )
114
+ p.add_argument("--conf", type=float, default=0.25, help="Confidence threshold")
115
+ p.add_argument(
116
+ "--snapshots",
117
+ type=Path,
118
+ default=None,
119
+ help="Directory to save snapshots when person/animal detected",
120
+ )
121
+ p.add_argument("--no-show", action="store_true", help="Do not show OpenCV window")
122
+ p.add_argument(
123
+ "--motion-only",
124
+ action="store_true",
125
+ help="Only run detection when motion is detected (saves CPU)",
126
+ )
127
+ p.add_argument(
128
+ "--known-faces",
129
+ type=Path,
130
+ default=None,
131
+ help="Folder with known face images (name.jpg); enables face recognition",
132
+ )
133
+ args = p.parse_args()
134
+
135
+ source = int(args.source) if str(args.source).isdigit() else args.source
136
+ sys.exit(run(
137
+ source=source,
138
+ conf=args.conf,
139
+ snapshot_dir=args.snapshots,
140
+ no_show=args.no_show,
141
+ motion_only=args.motion_only,
142
+ known_faces_dir=args.known_faces,
143
+ ))
144
+
145
+
146
+ if __name__ == "__main__":
147
+ main()
models/.gitkeep ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Custom models later (e.g. fine-tuned animal classifier).
2
+ # YOLOv8n is loaded via ultralytics from cache.
motion.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Simple motion detection: frame difference above threshold.
3
+ Use to only run detection when there's movement (saves CPU).
4
+ """
5
+
6
+ import cv2
7
+ import numpy as np
8
+ from typing import Optional
9
+
10
+
11
+ class MotionDetector:
12
+ def __init__(self, threshold: float = 25.0, min_area: int = 500):
13
+ """
14
+ threshold: mean absolute difference above this → motion
15
+ min_area: minimum contour area to count as motion (noise filter)
16
+ """
17
+ self.threshold = threshold
18
+ self.min_area = min_area
19
+ self._prev_gray: Optional[np.ndarray] = None
20
+
21
+ def has_motion(self, frame) -> bool:
22
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
23
+ gray = cv2.GaussianBlur(gray, (21, 21), 0)
24
+ if self._prev_gray is None:
25
+ self._prev_gray = gray
26
+ return True # First frame: assume motion so we run detection
27
+ diff = cv2.absdiff(self._prev_gray, gray)
28
+ self._prev_gray = gray
29
+ mean_diff = np.mean(diff)
30
+ if mean_diff < self.threshold:
31
+ return False
32
+ thresh = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)[1]
33
+ thresh = cv2.dilate(thresh, None, iterations=2)
34
+ contours, _ = cv2.findContours(
35
+ thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
36
+ )
37
+ for c in contours:
38
+ if cv2.contourArea(c) >= self.min_area:
39
+ return True
40
+ return False
notifier/notifier.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Notify on detections: print, log, and save snapshots.
3
+ Later: voice (pyttsx3 / gTTS).
4
+ """
5
+
6
+ import logging
7
+ from pathlib import Path
8
+ from datetime import datetime
9
+ from typing import List
10
+
11
+ import cv2
12
+
13
+ # Optional: from detector import Detection
14
+ # We accept generic dict/list to avoid circular imports; main passes Detection-like objects.
15
+
16
+
17
+ def _ensure_snapshots_dir(base_dir: Path) -> Path:
18
+ d = base_dir / "snapshots"
19
+ d.mkdir(parents=True, exist_ok=True)
20
+ return d
21
+
22
+
23
+ class Notifier:
24
+ """
25
+ Log and snapshot on person/animal detections.
26
+ """
27
+
28
+ def __init__(self, log_to_console: bool = True, snapshot_dir: Path = None):
29
+ self.log_to_console = log_to_console
30
+ self.snapshot_dir = snapshot_dir
31
+ self._logger = logging.getLogger("smartdoor.notifier")
32
+ if snapshot_dir is not None:
33
+ self._snap_dir = _ensure_snapshots_dir(snapshot_dir)
34
+ else:
35
+ self._snap_dir = None
36
+
37
+ def on_detections(self, frame, detections: List, detector, person_names=None) -> None:
38
+ """
39
+ For each detection: print/log label; if person or animal, optionally save snapshot.
40
+ person_names: optional list of (person_detection_index, name) from face recognizer.
41
+ """
42
+ person_names = person_names or []
43
+ name_by_index = dict(person_names)
44
+ idx = 0
45
+ for d in detections:
46
+ label = d.label
47
+ if detector.is_person(label):
48
+ name = name_by_index.get(idx)
49
+ msg = "{} is at the door".format(name) if name else "Person detected"
50
+ idx += 1
51
+ elif detector.is_animal(label):
52
+ msg = "{} detected".format(label)
53
+ else:
54
+ msg = "{} detected".format(label)
55
+
56
+ if self.log_to_console:
57
+ print(msg)
58
+ self._logger.info(msg)
59
+
60
+ if self._snap_dir is not None and (
61
+ detector.is_person(label) or detector.is_animal(label)
62
+ ):
63
+ self._save_snapshot(frame, label)
64
+
65
+ def _save_snapshot(self, frame, label: str) -> None:
66
+ if self._snap_dir is None:
67
+ return
68
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
69
+ name = f"{ts}_{label}.jpg"
70
+ path = self._snap_dir / name
71
+ cv2.imwrite(str(path), frame)
72
+ self._logger.info("Snapshot saved: %s", path)
recognizer/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .recognizer import FaceRecognizer, HAS_FACE_RECOGNITION
2
+
3
+ __all__ = ["FaceRecognizer", "HAS_FACE_RECOGNITION"]
recognizer/recognizer.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Face recognition: crop face from person bbox, embed, match known people.
3
+ Known faces: put images in known_faces/ as name.jpg (e.g. danny.jpg).
4
+ Encodings cached in known_faces/encodings.pkl.
5
+ """
6
+
7
+ from pathlib import Path
8
+ from typing import List, Optional, Tuple
9
+ import pickle
10
+ import logging
11
+
12
+ import cv2
13
+ import numpy as np
14
+
15
+ try:
16
+ import face_recognition
17
+ HAS_FACE_RECOGNITION = True
18
+ except ImportError:
19
+ HAS_FACE_RECOGNITION = False
20
+
21
+ logger = logging.getLogger("smartdoor.recognizer")
22
+
23
+ DEFAULT_TOLERANCE = 0.5
24
+
25
+
26
+ class FaceRecognizer:
27
+ def __init__(self, known_faces_dir, tolerance=DEFAULT_TOLERANCE):
28
+ self.known_faces_dir = Path(known_faces_dir)
29
+ self.tolerance = tolerance
30
+ self._encodings_by_name = {}
31
+ self._loaded = False
32
+ self._load_known_faces()
33
+
34
+ def _load_known_faces(self):
35
+ if not HAS_FACE_RECOGNITION:
36
+ logger.warning("face_recognition not installed; face recognition disabled")
37
+ return
38
+ cache = self.known_faces_dir / "encodings.pkl"
39
+ if cache.exists():
40
+ try:
41
+ with open(cache, "rb") as f:
42
+ self._encodings_by_name = pickle.load(f)
43
+ self._loaded = True
44
+ logger.info("Loaded %d known people from cache", len(self._encodings_by_name))
45
+ return
46
+ except Exception as e:
47
+ logger.warning("Could not load encodings cache: %s", e)
48
+ self._encodings_by_name = {}
49
+ for path in self.known_faces_dir.glob("*.jpg"):
50
+ name = path.stem
51
+ encodings = self._encode_image(path)
52
+ if encodings:
53
+ self._encodings_by_name[name] = encodings
54
+ logger.info("Registered %s from %s", name, path.name)
55
+ if self._encodings_by_name:
56
+ self.known_faces_dir.mkdir(parents=True, exist_ok=True)
57
+ try:
58
+ with open(cache, "wb") as f:
59
+ pickle.dump(self._encodings_by_name, f)
60
+ except Exception as e:
61
+ logger.warning("Could not save encodings cache: %s", e)
62
+ self._loaded = bool(self._encodings_by_name)
63
+
64
+ def _encode_image(self, path):
65
+ if not HAS_FACE_RECOGNITION:
66
+ return []
67
+ img = face_recognition.load_image_file(str(path))
68
+ encodings = face_recognition.face_encodings(img)
69
+ return list(encodings)
70
+
71
+ def _encode_bgr(self, bgr_frame):
72
+ if not HAS_FACE_RECOGNITION:
73
+ return []
74
+ rgb = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2RGB)
75
+ encodings = face_recognition.face_encodings(rgb)
76
+ return list(encodings)
77
+
78
+ def recognize_face(self, face_crop_bgr):
79
+ if not HAS_FACE_RECOGNITION or not self._encodings_by_name:
80
+ return None
81
+ encodings = self._encode_bgr(face_crop_bgr)
82
+ if not encodings:
83
+ return None
84
+ query = encodings[0]
85
+ for name, known_list in self._encodings_by_name.items():
86
+ matches = face_recognition.compare_faces(
87
+ known_list, query, tolerance=self.tolerance
88
+ )
89
+ if any(matches):
90
+ return name
91
+ return None
92
+
93
+ def recognize_faces_in_frame(self, frame_bgr, person_boxes):
94
+ results = []
95
+ if not HAS_FACE_RECOGNITION or not self._encodings_by_name:
96
+ return results
97
+ for (x1, y1, x2, y2) in person_boxes:
98
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
99
+ h, w = frame_bgr.shape[:2]
100
+ pad = 0.1
101
+ pw = int((x2 - x1) * pad)
102
+ ph = int((y2 - y1) * pad)
103
+ x1 = max(0, x1 - pw)
104
+ y1 = max(0, y1 - ph)
105
+ x2 = min(w, x2 + pw)
106
+ y2 = min(h, y2 + ph)
107
+ crop = frame_bgr[y1:y2, x1:x2]
108
+ if crop.size == 0:
109
+ results.append(((x1, y1, x2, y2), None))
110
+ continue
111
+ rgb_crop = cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
112
+ face_locs = face_recognition.face_locations(rgb_crop, model="hog")
113
+ if not face_locs:
114
+ results.append(((x1, y1, x2, y2), None))
115
+ continue
116
+ t, r, b, l = face_locs[0]
117
+ face_crop = crop[t:b, l:r]
118
+ if face_crop.size == 0:
119
+ results.append(((x1, y1, x2, y2), None))
120
+ continue
121
+ name = self.recognize_face(face_crop)
122
+ fx1 = x1 + l
123
+ fy1 = y1 + t
124
+ fx2 = x1 + r
125
+ fy2 = y1 + b
126
+ results.append(((fx1, fy1, fx2, fy2), name))
127
+ return results
128
+
129
+ @property
130
+ def is_available(self):
131
+ return HAS_FACE_RECOGNITION and self._loaded
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ ultralytics>=8.0.0
2
+ opencv-python>=4.8.0
3
+ gradio>=4.0.0
4
+ face_recognition>=1.3.0