Spaces:
Sleeping
Sleeping
Commit ·
5cc5b3f
0
Parent(s):
first commit
Browse files- .gitignore +15 -0
- Dockerfile +48 -0
- README.md +220 -0
- __pycache__/helper.cpython-312.pyc +0 -0
- __pycache__/liveness.cpython-312.pyc +0 -0
- __pycache__/main.cpython-310.pyc +0 -0
- __pycache__/main.cpython-312.pyc +0 -0
- __pycache__/main.cpython-313.pyc +0 -0
- helper.py +63 -0
- liveness.py +59 -0
- main.py +449 -0
- packages.txt +3 -0
- requirements.txt +12 -0
.gitignore
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
./face_db
|
| 2 |
+
myenv/
|
| 3 |
+
facerecog/
|
| 4 |
+
test_verify-image.py
|
| 5 |
+
test_verify.py
|
| 6 |
+
test/*
|
| 7 |
+
face_db/*
|
| 8 |
+
*.onnx
|
| 9 |
+
*.onnx.*
|
| 10 |
+
*.bin
|
| 11 |
+
*.jpg
|
| 12 |
+
*.jpeg
|
| 13 |
+
*.png
|
| 14 |
+
*.webp
|
| 15 |
+
*.onnx
|
Dockerfile
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ---------- CPU BASE ----------
|
| 2 |
+
# Menggunakan image Python official yang ringan (slim) berbasis Debian
|
| 3 |
+
# Tidak perlu image NVIDIA/CUDA karena kita hanya menggunakan CPU
|
| 4 |
+
FROM python:3.10-slim
|
| 5 |
+
|
| 6 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
| 7 |
+
PYTHONUNBUFFERED=1 \
|
| 8 |
+
PYTHONDONTWRITEBYTECODE=1
|
| 9 |
+
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
# System deps
|
| 13 |
+
# Kita tetap butuh library untuk OpenCV (libglib, libsm6, dll)
|
| 14 |
+
# dan build-essential untuk berjaga-jaga jika ada library python yang perlu dicompile
|
| 15 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 16 |
+
build-essential gcc g++ make \
|
| 17 |
+
libglib2.0-0 libsm6 libxrender1 libxext6 \
|
| 18 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 19 |
+
# Source - https://stackoverflow.com/a
|
| 20 |
+
# Posted by Tushar Kolhe, modified by community. See post 'Timeline' for change history
|
| 21 |
+
# Retrieved 2025-12-05, License - CC BY-SA 4.0
|
| 22 |
+
|
| 23 |
+
RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 -y
|
| 24 |
+
|
| 25 |
+
# Python deps
|
| 26 |
+
COPY requirements.txt /app/requirements.txt
|
| 27 |
+
# Upgrade pip dan install requirements
|
| 28 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
| 29 |
+
pip install --no-cache-dir -r /app/requirements.txt
|
| 30 |
+
|
| 31 |
+
# App
|
| 32 |
+
COPY main.py /app/main.py
|
| 33 |
+
COPY liveness.py /app/liveness.py
|
| 34 |
+
COPY helper.py /app/helper.py
|
| 35 |
+
COPY 2.7_80x80_MiniFASNetV2.onnx /app/2.7_80x80_MiniFASNetV2.onnx
|
| 36 |
+
|
| 37 |
+
# Runtime env
|
| 38 |
+
ENV FACE_DET_SIZE=480,480
|
| 39 |
+
# PENTING: Set provider hanya ke CPU
|
| 40 |
+
ENV ORT_PROVIDERS=CPUExecutionProvider
|
| 41 |
+
|
| 42 |
+
# Hugging Face Spaces secara default biasanya mengharapkan port 7860.
|
| 43 |
+
# Jika config Space Anda mengharuskan 8000, biarkan 8000.
|
| 44 |
+
# Namun standard Docker Space adalah 7860.
|
| 45 |
+
EXPOSE 7860
|
| 46 |
+
|
| 47 |
+
# Workers bisa dinaikkan sedikit karena di CPU tidak ada limit context locking seperti di GPU
|
| 48 |
+
CMD ["uvicorn", "main:app", "--host=0.0.0.0", "--port=7860", "--workers=2"]
|
README.md
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Face Recognition Sss Beta
|
| 3 |
+
emoji: 📊
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# Face Verification API (FastAPI + InsightFace)
|
| 11 |
+
|
| 12 |
+
FastAPI service for face enroll & verification using InsightFace (`buffalo_l`).
|
| 13 |
+
Supports:
|
| 14 |
+
- Enroll via **JSON (base64)** or **multipart files**
|
| 15 |
+
- Verify via **single HTTP POST** (multipart or JSON) and **WebSocket stream** (binary or JSON)
|
| 16 |
+
- Clear per-employee embeddings
|
| 17 |
+
- Swagger UI
|
| 18 |
+
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
## Endpoints
|
| 22 |
+
|
| 23 |
+
- `POST /face/enroll` — JSON base64 enroll
|
| 24 |
+
- `POST /face/enroll-files` — multipart file enroll
|
| 25 |
+
- `POST /face/verify` — verify one frame (multipart `frame`/`image` or JSON data URL)
|
| 26 |
+
- `WEBSOCKET /face/verify` — stream frames (binary bytes or JSON data URL)
|
| 27 |
+
- `POST /face/verify-image` — JSON base64 verify
|
| 28 |
+
- `POST /face/clear` — clear embeddings
|
| 29 |
+
- `GET /health` — health check
|
| 30 |
+
|
| 31 |
+
Swagger UI: [http://localhost:8000/docs](http://localhost:8000/docs)
|
| 32 |
+
ReDoc: [http://localhost:8000/redoc](http://localhost:8000/redoc)
|
| 33 |
+
|
| 34 |
+
---
|
| 35 |
+
|
| 36 |
+
## GPU Setup
|
| 37 |
+
|
| 38 |
+
Use if you want speed and have an NVIDIA GPU.
|
| 39 |
+
|
| 40 |
+
**Image requirements:**
|
| 41 |
+
- `onnxruntime-gpu` in requirements.txt
|
| 42 |
+
- Base image like `nvidia/cuda:12.4.1-cudnn-runtime-ubuntu22.04`
|
| 43 |
+
|
| 44 |
+
### 1. Install NVIDIA driver on host
|
| 45 |
+
|
| 46 |
+
```bash
|
| 47 |
+
nvidia-smi # should print your GPU details
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### 2. Install NVIDIA Container Toolkit (Ubuntu example)
|
| 51 |
+
|
| 52 |
+
```bash
|
| 53 |
+
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
| 54 |
+
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey \
|
| 55 |
+
| sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
|
| 56 |
+
curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list \
|
| 57 |
+
| sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' \
|
| 58 |
+
| sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
| 59 |
+
sudo apt-get update
|
| 60 |
+
sudo apt-get install -y nvidia-container-toolkit
|
| 61 |
+
sudo nvidia-ctk runtime configure --runtime=docker
|
| 62 |
+
sudo systemctl restart docker
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
### 3. Test GPU passthrough
|
| 66 |
+
|
| 67 |
+
```bash
|
| 68 |
+
sudo docker run --rm --gpus all nvidia/cuda:12.4.1-base-ubuntu22.04 nvidia-smi
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
### 4. Build & Run (GPU)
|
| 72 |
+
|
| 73 |
+
```bash
|
| 74 |
+
sudo docker build -t face-api-gpu .
|
| 75 |
+
sudo docker run --rm -it --gpus all -p 8000:8000 \
|
| 76 |
+
-e ORT_PROVIDERS=CUDAExecutionProvider,CPUExecutionProvider \
|
| 77 |
+
-e FACE_SIM_THRESHOLD=0.75 \
|
| 78 |
+
-v $HOME/.insightface:/root/.insightface \
|
| 79 |
+
face-api-gpu
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
Open: [http://localhost:8000/docs](http://localhost:8000/docs)
|
| 83 |
+
|
| 84 |
+
---
|
| 85 |
+
|
| 86 |
+
## Troubleshooting
|
| 87 |
+
|
| 88 |
+
- **Error:** could not select device driver "" with capabilities: [[gpu]]
|
| 89 |
+
- NVIDIA Container Toolkit not set up → follow the GPU Setup steps.
|
| 90 |
+
- **Old Docker (<19.03):**
|
| 91 |
+
```bash
|
| 92 |
+
sudo docker run --rm -it --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all ...
|
| 93 |
+
```
|
| 94 |
+
- **Warning:** The NVIDIA Driver was not detected
|
| 95 |
+
- Install the host driver (`nvidia-smi` must work), then restart Docker.
|
| 96 |
+
- **Container exits during startup**
|
| 97 |
+
- Try CPU first:
|
| 98 |
+
```bash
|
| 99 |
+
sudo docker run --rm -it -p 8000:8000 \
|
| 100 |
+
-e ORT_PROVIDERS=CPUExecutionProvider \
|
| 101 |
+
-v $HOME/.insightface:/root/.insightface \
|
| 102 |
+
face-api-gpu
|
| 103 |
+
```
|
| 104 |
+
- **Permissions error on Docker socket**
|
| 105 |
+
- Add user to docker group:
|
| 106 |
+
```bash
|
| 107 |
+
sudo usermod -aG docker $USER
|
| 108 |
+
# log out/in or reboot
|
| 109 |
+
```
|
| 110 |
+
- Or use `sudo docker ...`
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
---
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
## Open Swagger
|
| 117 |
+
```bash
|
| 118 |
+
http://localhost:8000/docs/
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
---
|
| 122 |
+
|
| 123 |
+
## Curl Examples
|
| 124 |
+
|
| 125 |
+
### Enroll (files)
|
| 126 |
+
|
| 127 |
+
```bash
|
| 128 |
+
curl -X POST "http://localhost:8000/face/enroll-files" \
|
| 129 |
+
-F "employee_id=1" \
|
| 130 |
+
-F "files=@/path/to/3x4.jpg" \
|
| 131 |
+
-F "files=@/path/to/another.jpg"
|
| 132 |
+
```
|
| 133 |
+
|
| 134 |
+
### Enroll (base64 JSON)
|
| 135 |
+
|
| 136 |
+
```bash
|
| 137 |
+
IMG_B64="$(base64 -w0 /path/to/3x4.jpg)"
|
| 138 |
+
curl -X POST "http://localhost:8000/face/enroll" \
|
| 139 |
+
-H "Content-Type: application/json" \
|
| 140 |
+
-d "{\"employee_id\":\"1\",\"images\":[\"data:image/jpeg;base64,$IMG_B64\"]}"
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
### Verify (single, multipart file)
|
| 144 |
+
|
| 145 |
+
```bash
|
| 146 |
+
curl -X POST "http://localhost:8000/face/verify?employee_id=1&threshold=0.75" \
|
| 147 |
+
-F "frame=@/path/to/capture.jpg"
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
### Verify (single, JSON data URL)
|
| 151 |
+
|
| 152 |
+
```bash
|
| 153 |
+
IMG_B64="$(base64 -w0 /path/to/capture.jpg)"
|
| 154 |
+
curl -X POST "http://localhost:8000/face/verify?employee_id=1&threshold=0.75" \
|
| 155 |
+
-H "Content-Type: application/json" \
|
| 156 |
+
-d "{\"frame\":\"data:image/jpeg;base64,$IMG_B64\"}"
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
### Verify (image-only JSON)
|
| 160 |
+
|
| 161 |
+
```bash
|
| 162 |
+
IMG_B64="$(base64 -w0 /path/to/capture.jpg)"
|
| 163 |
+
curl -X POST "http://localhost:8000/face/verify-image" \
|
| 164 |
+
-H "Content-Type: application/json" \
|
| 165 |
+
-d "{\"employee_id\":\"1\",\"image\":\"data:image/jpeg;base64,$IMG_B64\",\"threshold\":0.75}"
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
### Clear embeddings
|
| 169 |
+
|
| 170 |
+
```bash
|
| 171 |
+
curl -X POST "http://localhost:8000/face/clear" \
|
| 172 |
+
-H "Content-Type: application/json" \
|
| 173 |
+
-d '{"employee_id":"1"}'
|
| 174 |
+
```
|
| 175 |
+
---
|
| 176 |
+
|
| 177 |
+
## Dev (local, no Docker)
|
| 178 |
+
|
| 179 |
+
```bash
|
| 180 |
+
pip install -r requirements.txt
|
| 181 |
+
export ORT_PROVIDERS=CPUExecutionProvider
|
| 182 |
+
uvicorn main:app --host 0.0.0.0 --port 8000 --reload
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
---
|
| 186 |
+
|
| 187 |
+
## IF CPU-only
|
| 188 |
+
|
| 189 |
+
### 1. Dockerfile (CPU)
|
| 190 |
+
|
| 191 |
+
```dockerfile
|
| 192 |
+
FROM python:3.11-slim
|
| 193 |
+
ENV DEBIAN_FRONTEND=noninteractive PYTHONUNBUFFERED=1 PYTHONDONTWRITEBYTECODE=1
|
| 194 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 195 |
+
libglib2.0-0 libsm6 libxrender1 libxext6 build-essential gcc g++ make \
|
| 196 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 197 |
+
WORKDIR /app
|
| 198 |
+
COPY requirements.txt /app/requirements.txt
|
| 199 |
+
# requirements.txt should use "onnxruntime" (CPU), not "onnxruntime-gpu"
|
| 200 |
+
RUN pip install --no-cache-dir -r /app/requirements.txt
|
| 201 |
+
COPY main.py /app/main.py
|
| 202 |
+
ENV ORT_PROVIDERS=CPUExecutionProvider FACE_DET_SIZE=480,480
|
| 203 |
+
EXPOSE 8000
|
| 204 |
+
CMD ["uvicorn","main:app","--host=0.0.0.0","--port=8000","--workers=1"]
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
#### Build & Run (CPU)
|
| 208 |
+
|
| 209 |
+
```bash
|
| 210 |
+
sudo docker build -t face-api-cpu .
|
| 211 |
+
sudo docker run --rm -it -p 8000:8000 \
|
| 212 |
+
-e ORT_PROVIDERS=CPUExecutionProvider \
|
| 213 |
+
-v $HOME/.insightface:/root/.insightface \
|
| 214 |
+
face-api-cpu
|
| 215 |
+
```
|
| 216 |
+
|
| 217 |
+
Open: [http://localhost:8000/docs](http://localhost:8000/docs)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
---
|
__pycache__/helper.cpython-312.pyc
ADDED
|
Binary file (4.35 kB). View file
|
|
|
__pycache__/liveness.cpython-312.pyc
ADDED
|
Binary file (3.22 kB). View file
|
|
|
__pycache__/main.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
__pycache__/main.cpython-312.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
__pycache__/main.cpython-313.pyc
ADDED
|
Binary file (19.8 kB). View file
|
|
|
helper.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
import os
|
| 3 |
+
import base64
|
| 4 |
+
import cv2
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
FACE_DB_ROOT = "face_db"
|
| 8 |
+
os.makedirs(FACE_DB_ROOT, exist_ok=True)
|
| 9 |
+
|
| 10 |
+
LIVE_DB_ROOT = os.path.join(FACE_DB_ROOT, "_live")
|
| 11 |
+
os.makedirs(LIVE_DB_ROOT, exist_ok=True)
|
| 12 |
+
|
| 13 |
+
def _strip_b64(s: str) -> str:
|
| 14 |
+
if isinstance(s, str) and "," in s and s.lstrip().lower().startswith("data:"):
|
| 15 |
+
return s.split(",", 1)[1]
|
| 16 |
+
return s
|
| 17 |
+
|
| 18 |
+
def _b64_to_bgr(b64: str) -> Optional[np.ndarray]:
|
| 19 |
+
try:
|
| 20 |
+
raw = base64.b64decode(_strip_b64(b64), validate=False)
|
| 21 |
+
arr = np.frombuffer(raw, np.uint8)
|
| 22 |
+
return cv2.imdecode(arr, cv2.IMREAD_COLOR)
|
| 23 |
+
except Exception:
|
| 24 |
+
return None
|
| 25 |
+
|
| 26 |
+
def _bytes_to_bgr(data: bytes) -> Optional[np.ndarray]:
|
| 27 |
+
try:
|
| 28 |
+
arr = np.frombuffer(data, np.uint8)
|
| 29 |
+
return cv2.imdecode(arr, cv2.IMREAD_COLOR)
|
| 30 |
+
except Exception:
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
def _frame_path_for(employee_id: str) -> str:
|
| 34 |
+
safe = (employee_id or "live").replace("/", "_")
|
| 35 |
+
return os.path.join(LIVE_DB_ROOT, f"{safe}.jpg")
|
| 36 |
+
|
| 37 |
+
class MotionVerifier:
|
| 38 |
+
def __init__(self, required_frames=5, threshold=1.5):
|
| 39 |
+
self.required_frames = required_frames
|
| 40 |
+
self.threshold = threshold
|
| 41 |
+
self.landmarks_buffer = []
|
| 42 |
+
|
| 43 |
+
def add_landmarks(self, landmarks):
|
| 44 |
+
self.landmarks_buffer.append(landmarks)
|
| 45 |
+
if len(self.landmarks_buffer) > self.required_frames:
|
| 46 |
+
self.landmarks_buffer.pop(0)
|
| 47 |
+
|
| 48 |
+
def is_ready(self):
|
| 49 |
+
return len(self.landmarks_buffer) >= self.required_frames
|
| 50 |
+
|
| 51 |
+
def check_motion(self):
|
| 52 |
+
if not self.is_ready():
|
| 53 |
+
return False, "buffering"
|
| 54 |
+
data = np.array(self.landmarks_buffer)
|
| 55 |
+
std_dev = np.std(data, axis=0)
|
| 56 |
+
avg_movement = np.mean(std_dev)
|
| 57 |
+
print(f"[Motion] Avg Movement: {avg_movement:.4f} px")
|
| 58 |
+
if avg_movement < self.threshold:
|
| 59 |
+
return False, "static_spoof_detected"
|
| 60 |
+
return True, "passed"
|
| 61 |
+
|
| 62 |
+
def reset(self):
|
| 63 |
+
self.landmarks_buffer = []
|
liveness.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import onnxruntime as ort
|
| 4 |
+
|
| 5 |
+
class LivenessDetector:
|
| 6 |
+
def __init__(self, model_path, device="cpu"):
|
| 7 |
+
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if device == "cuda" else ['CPUExecutionProvider']
|
| 8 |
+
self.session = ort.InferenceSession(model_path, providers=providers)
|
| 9 |
+
self.input_name = self.session.get_inputs()[0].name
|
| 10 |
+
print(f"[INFO] Model loaded dari {model_path} pada device {device}")
|
| 11 |
+
|
| 12 |
+
def _get_kernel(self, height, width):
|
| 13 |
+
kernel_size = (height + width) / 2
|
| 14 |
+
scale = 2.7
|
| 15 |
+
return int(kernel_size * scale)
|
| 16 |
+
|
| 17 |
+
def predict(self, img, bbox):
|
| 18 |
+
"""
|
| 19 |
+
img: gambar full BGR
|
| 20 |
+
bbox: boundix box [x1, y1, x2, y2] hasil dari RetinaFace()
|
| 21 |
+
"""
|
| 22 |
+
h_img, w_img, _ = img.shape
|
| 23 |
+
x1, y1, x2, y2 = map(int, bbox)
|
| 24 |
+
|
| 25 |
+
box_h = y2 - y1
|
| 26 |
+
box_w = x2 - x1
|
| 27 |
+
|
| 28 |
+
center_x = x1 + box_w // 2
|
| 29 |
+
center_y = y1 + box_h // 2
|
| 30 |
+
|
| 31 |
+
side = self._get_kernel(box_h, box_w)
|
| 32 |
+
|
| 33 |
+
new_x1 = max(0, center_x - side // 2)
|
| 34 |
+
new_y1 = max(0, center_y - side // 2)
|
| 35 |
+
new_x2 = min(w_img, center_x + side // 2)
|
| 36 |
+
new_y2 = min(h_img, center_y + side // 2)
|
| 37 |
+
|
| 38 |
+
cropped = img[new_y1:new_y2, new_x1:new_x2]
|
| 39 |
+
|
| 40 |
+
if cropped.size == 0:
|
| 41 |
+
return False, 0.0
|
| 42 |
+
|
| 43 |
+
# prep yaitu resize ke 80x80
|
| 44 |
+
resized = cv2.resize(cropped, (80, 80))
|
| 45 |
+
|
| 46 |
+
# Convert ke Float32 & Transpose (HWC -> CHW)
|
| 47 |
+
blob = resized.astype(np.float32)
|
| 48 |
+
blob = np.transpose(blob, (2, 0, 1))
|
| 49 |
+
blob = np.expand_dims(blob, 0)
|
| 50 |
+
|
| 51 |
+
result = self.session.run(None, {self.input_name: blob})
|
| 52 |
+
|
| 53 |
+
probs = self._softmax(result[0][0])
|
| 54 |
+
|
| 55 |
+
return probs
|
| 56 |
+
|
| 57 |
+
def _softmax(self, x):
|
| 58 |
+
e_x = np.exp(x - np.max(x))
|
| 59 |
+
return e_x / e_x.sum()
|
main.py
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import base64
|
| 3 |
+
import asyncio
|
| 4 |
+
import json
|
| 5 |
+
from typing import List, Optional, Iterable
|
| 6 |
+
|
| 7 |
+
# tool buat liveness check
|
| 8 |
+
from liveness import LivenessDetector
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import cv2
|
| 12 |
+
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Query, UploadFile, File, Form, HTTPException, Request
|
| 13 |
+
from pydantic import BaseModel, Field
|
| 14 |
+
from starlette.concurrency import run_in_threadpool
|
| 15 |
+
from starlette.middleware.cors import CORSMiddleware
|
| 16 |
+
from fastapi.responses import JSONResponse
|
| 17 |
+
|
| 18 |
+
from helper import _b64_to_bgr, _bytes_to_bgr, _frame_path_for
|
| 19 |
+
from helper import MotionVerifier
|
| 20 |
+
|
| 21 |
+
# Import Uniface (Single Library)
|
| 22 |
+
from uniface import RetinaFace, ArcFace
|
| 23 |
+
|
| 24 |
+
# Konfigurasi Threshold
|
| 25 |
+
# Stage 1: Detection Confidence (Pengganti Liveness Check sementara)
|
| 26 |
+
# Wajah dengan confidence di bawah ini dianggap tidak valid/buruk
|
| 27 |
+
DETECTION_THRESHOLD = 0.7
|
| 28 |
+
|
| 29 |
+
# Stage 2: Recognition Similarity (Cosine Similarity)
|
| 30 |
+
SIM_THRESHOLD = 0.40
|
| 31 |
+
|
| 32 |
+
app = FastAPI(
|
| 33 |
+
title="Face Verification API (Uniface Pure)",
|
| 34 |
+
version="2.2.0",
|
| 35 |
+
description="2-Stage Verification: High-Confidence Detection -> Uniface Recognition",
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
app.add_middleware(
|
| 39 |
+
CORSMiddleware,
|
| 40 |
+
allow_origins=["*"],
|
| 41 |
+
allow_credentials=True,
|
| 42 |
+
allow_methods=["*"],
|
| 43 |
+
allow_headers=["*"],
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
FACE_DB_ROOT = "face_db"
|
| 47 |
+
os.makedirs(FACE_DB_ROOT, exist_ok=True)
|
| 48 |
+
|
| 49 |
+
LIVE_DB_ROOT = os.path.join(FACE_DB_ROOT, "_live")
|
| 50 |
+
os.makedirs(LIVE_DB_ROOT, exist_ok=True)
|
| 51 |
+
|
| 52 |
+
processing_lock = asyncio.Lock()
|
| 53 |
+
|
| 54 |
+
# --- INITIALIZATION (UNIFACE and Minivision) ---
|
| 55 |
+
models = {
|
| 56 |
+
"detector": None,
|
| 57 |
+
"recognizer": None,
|
| 58 |
+
"liveness": None,
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
try:
|
| 62 |
+
print("[Uniface] Initializing RetinaFace (Stage 1)...")
|
| 63 |
+
models["detector"] = RetinaFace()
|
| 64 |
+
print("[Minivision] Initializing Liveness Detector...")
|
| 65 |
+
models["liveness"] = LivenessDetector("2.7_80x80_MiniFASNetV2.onnx")
|
| 66 |
+
print("[Uniface] Initializing ArcFace (Stage 2)...")
|
| 67 |
+
models["recognizer"] = ArcFace()
|
| 68 |
+
|
| 69 |
+
except Exception as e:
|
| 70 |
+
print(f"[Uniface] Error initializing models: {e}")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class EnrollRequest(BaseModel):
|
| 74 |
+
employee_id: str = Field(default="1")
|
| 75 |
+
images: List[str] = Field(..., description="Base64 images")
|
| 76 |
+
|
| 77 |
+
class VerifyImageRequest(BaseModel):
|
| 78 |
+
employee_id: str = Field(default="1")
|
| 79 |
+
image: str = Field(..., description="Base64 image")
|
| 80 |
+
threshold: Optional[float] = Field(default=None)
|
| 81 |
+
|
| 82 |
+
class ClearRequest(BaseModel):
|
| 83 |
+
employee_id: str = Field(default="1")
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
async def _decode_image_from_request(request: Request, field_names: Iterable[str] = ("frame", "image")) -> Optional[np.ndarray]:
|
| 89 |
+
ct = (request.headers.get("content-type") or "").lower()
|
| 90 |
+
print(f"--- DEBUG START ---")
|
| 91 |
+
print(f"1. Content-Type yang diterima: {ct}")
|
| 92 |
+
if "multipart/form-data" in ct:
|
| 93 |
+
form = await request.form()
|
| 94 |
+
for name in field_names:
|
| 95 |
+
file = form.get(name)
|
| 96 |
+
if not file:
|
| 97 |
+
continue
|
| 98 |
+
|
| 99 |
+
if hasattr(file, "read"):
|
| 100 |
+
await file.seek(0)
|
| 101 |
+
data = await file.read()
|
| 102 |
+
img = _bytes_to_bgr(data)
|
| 103 |
+
if img is not None:
|
| 104 |
+
return img
|
| 105 |
+
|
| 106 |
+
elif isinstance(file, bytes):
|
| 107 |
+
img = _bytes_to_bgr(file)
|
| 108 |
+
if img is not None:
|
| 109 |
+
return img
|
| 110 |
+
|
| 111 |
+
# Coba decode
|
| 112 |
+
elif isinstance(file, str):
|
| 113 |
+
img = _b64_to_bgr(file)
|
| 114 |
+
if img is not None:
|
| 115 |
+
return img
|
| 116 |
+
|
| 117 |
+
return None
|
| 118 |
+
if "application/json" in ct or "text/json" in ct:
|
| 119 |
+
try:
|
| 120 |
+
obj = await request.json()
|
| 121 |
+
except Exception:
|
| 122 |
+
return None
|
| 123 |
+
for name in field_names:
|
| 124 |
+
val = obj.get(name)
|
| 125 |
+
if isinstance(val, str):
|
| 126 |
+
img = _b64_to_bgr(val)
|
| 127 |
+
if img is not None:
|
| 128 |
+
return img
|
| 129 |
+
return None
|
| 130 |
+
return None
|
| 131 |
+
|
| 132 |
+
def _compute_embedding_uniface(img_bgr, landmarks):
|
| 133 |
+
"""Helper to get embedding using Uniface ArcFace"""
|
| 134 |
+
if models["recognizer"] is None:
|
| 135 |
+
return None
|
| 136 |
+
try:
|
| 137 |
+
# get_normalized_embedding requires the image and landmarks
|
| 138 |
+
embedding = models["recognizer"].get_normalized_embedding(img_bgr, landmarks)
|
| 139 |
+
return embedding
|
| 140 |
+
except Exception:
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
+
async def _run_pipeline(img: np.ndarray, target_id: str):
|
| 144 |
+
|
| 145 |
+
detector = models["detector"]
|
| 146 |
+
liveness = models["liveness"]
|
| 147 |
+
|
| 148 |
+
if detector is None or liveness is None:
|
| 149 |
+
raise RuntimeError("Models not initialized")
|
| 150 |
+
# --- STAGE 1: DETECTION & QUALITY CHECK ---
|
| 151 |
+
# Menggunakan RetinaFace untuk mendeteksi wajah
|
| 152 |
+
async with processing_lock:
|
| 153 |
+
faces = await run_in_threadpool(detector.detect, img)
|
| 154 |
+
|
| 155 |
+
if not faces:
|
| 156 |
+
return None
|
| 157 |
+
|
| 158 |
+
# Ambil wajah dengan confidence tertinggi atau area terbesar
|
| 159 |
+
target_face = faces[0]
|
| 160 |
+
bbox = target_face['bbox'] # [x1, y1, x2, y2]
|
| 161 |
+
landmarks = target_face['landmarks']
|
| 162 |
+
confidence = target_face['confidence']
|
| 163 |
+
|
| 164 |
+
x1, y1, x2, y2 = map(int, bbox)
|
| 165 |
+
|
| 166 |
+
# Filter Stage 1: Check Confidence
|
| 167 |
+
if confidence < DETECTION_THRESHOLD:
|
| 168 |
+
|
| 169 |
+
return {
|
| 170 |
+
"bbox": [float(x1), float(y1), float(x2), float(y2)],
|
| 171 |
+
"match_user": None,
|
| 172 |
+
"confidence": 0.0,
|
| 173 |
+
"authorized": False,
|
| 174 |
+
"det_score": float(confidence),
|
| 175 |
+
"fake": True, # flag sebagai fake
|
| 176 |
+
"model_threshold": DETECTION_THRESHOLD,
|
| 177 |
+
"raw_distance": 0.0,
|
| 178 |
+
"reason": "Low quality/confidence detection"
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
# STAGE 1.5 : LIVENESS CHECK (MINIFASNETV2)
|
| 182 |
+
if liveness is not None:
|
| 183 |
+
PROBS_REAL_IDX = 1
|
| 184 |
+
probs = liveness.predict(img, bbox)
|
| 185 |
+
max_probs = np.argmax(probs)
|
| 186 |
+
live_score = probs[PROBS_REAL_IDX]
|
| 187 |
+
|
| 188 |
+
if max_probs != PROBS_REAL_IDX:
|
| 189 |
+
return {
|
| 190 |
+
"bbox": [float(x1), float(y1), float(x2), float(y2)],
|
| 191 |
+
"match_user": None,
|
| 192 |
+
"confidence": 0.0,
|
| 193 |
+
"authorized": False,
|
| 194 |
+
"det_score": float(confidence),
|
| 195 |
+
"fake": True,
|
| 196 |
+
"model_threshold": DETECTION_THRESHOLD,
|
| 197 |
+
"raw_distance": 0.0,
|
| 198 |
+
"reason": "Liveness check failed",
|
| 199 |
+
"live_score": float(live_score)
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
# --- STAGE 2: RECOGNITION (ArcFace) ---
|
| 203 |
+
# Jika lolos Stage 1.5, lanjut ke Recognition
|
| 204 |
+
probe_emb = await run_in_threadpool(_compute_embedding_uniface, img, landmarks)
|
| 205 |
+
|
| 206 |
+
if probe_emb is None:
|
| 207 |
+
return None
|
| 208 |
+
|
| 209 |
+
best_score = -1.0
|
| 210 |
+
best_user = None
|
| 211 |
+
|
| 212 |
+
# Determine which users to check
|
| 213 |
+
search_dirs = []
|
| 214 |
+
if target_id == "*":
|
| 215 |
+
for d in os.listdir(FACE_DB_ROOT):
|
| 216 |
+
if not d.startswith("_"):
|
| 217 |
+
search_dirs.append(d)
|
| 218 |
+
else:
|
| 219 |
+
if os.path.exists(os.path.join(FACE_DB_ROOT, target_id)):
|
| 220 |
+
search_dirs.append(target_id)
|
| 221 |
+
|
| 222 |
+
# Search in DB
|
| 223 |
+
for uid in search_dirs:
|
| 224 |
+
user_dir = os.path.join(FACE_DB_ROOT, uid)
|
| 225 |
+
files = os.listdir(user_dir)
|
| 226 |
+
|
| 227 |
+
if not files:
|
| 228 |
+
print(f"[DEBUG] Folder {uid} kosong")
|
| 229 |
+
|
| 230 |
+
for fname in os.listdir(user_dir):
|
| 231 |
+
if not fname.lower().endswith(('.jpg', '.png', '.jpeg')):
|
| 232 |
+
continue
|
| 233 |
+
|
| 234 |
+
ref_path = os.path.join(user_dir, fname)
|
| 235 |
+
try:
|
| 236 |
+
# Load ref image
|
| 237 |
+
# Note: In production, embeddings should be cached in memory/database
|
| 238 |
+
ref_img = cv2.imread(ref_path)
|
| 239 |
+
if ref_img is None: continue
|
| 240 |
+
|
| 241 |
+
ref_faces = detector.detect(ref_img)
|
| 242 |
+
|
| 243 |
+
if not ref_faces:
|
| 244 |
+
continue
|
| 245 |
+
|
| 246 |
+
# Get embedding
|
| 247 |
+
ref_emb = _compute_embedding_uniface(ref_img, ref_faces[0]['landmarks'])
|
| 248 |
+
if ref_emb is not None:
|
| 249 |
+
probe_flat = probe_emb.flatten()
|
| 250 |
+
ref_flat = ref_emb.flatten()
|
| 251 |
+
# Cosine similarity
|
| 252 |
+
sim = np.dot(probe_flat, ref_flat)
|
| 253 |
+
if sim > best_score:
|
| 254 |
+
best_score = sim
|
| 255 |
+
best_user = uid
|
| 256 |
+
except Exception:
|
| 257 |
+
continue
|
| 258 |
+
|
| 259 |
+
authorized = bool(best_score > SIM_THRESHOLD)
|
| 260 |
+
|
| 261 |
+
return {
|
| 262 |
+
"bbox": [float(x1), float(y1), float(x2), float(y2)],
|
| 263 |
+
"match_user": best_user if authorized else None,
|
| 264 |
+
"confidence": round(float(best_score), 4),
|
| 265 |
+
"authorized": authorized,
|
| 266 |
+
"det_score": float(confidence),
|
| 267 |
+
"fake": False, # passed all stages
|
| 268 |
+
"live_score": float(live_score),
|
| 269 |
+
"model_threshold": SIM_THRESHOLD,
|
| 270 |
+
"raw_distance": float(best_score)
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
@app.get("/health")
|
| 275 |
+
async def health():
|
| 276 |
+
status = "ok"
|
| 277 |
+
if models["detector"] is None or models["recognizer"] is None or models["liveness"] is None:
|
| 278 |
+
status = "models_loading_or_failed"
|
| 279 |
+
return {"status": status, "system": "Uniface Pure (Stage1:Detect, Stage2:Recognize)"}
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
@app.post("/face/enroll")
|
| 283 |
+
async def face_enroll(payload: EnrollRequest):
|
| 284 |
+
if not payload.images:
|
| 285 |
+
raise HTTPException(status_code=400, detail="No images provided")
|
| 286 |
+
employee_id = payload.employee_id.strip() or "1"
|
| 287 |
+
save_dir = os.path.join(FACE_DB_ROOT, employee_id)
|
| 288 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 289 |
+
count = 0
|
| 290 |
+
for s in payload.images:
|
| 291 |
+
img = _b64_to_bgr(s)
|
| 292 |
+
if img is None:
|
| 293 |
+
continue
|
| 294 |
+
out_path = os.path.join(save_dir, f"face_{count}.jpg")
|
| 295 |
+
cv2.imwrite(out_path, img)
|
| 296 |
+
count += 1
|
| 297 |
+
return {"employee_id": employee_id, "added": count, "total": len(os.listdir(save_dir))}
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
@app.post("/face/enroll-files")
|
| 301 |
+
async def face_enroll_files(
|
| 302 |
+
employee_id: str = Form(default="1"),
|
| 303 |
+
files: List[UploadFile] = File(default=[]),
|
| 304 |
+
):
|
| 305 |
+
employee_id = employee_id.strip() or "1"
|
| 306 |
+
if not files:
|
| 307 |
+
raise HTTPException(status_code=400, detail="No files uploaded")
|
| 308 |
+
save_dir = os.path.join(FACE_DB_ROOT, employee_id)
|
| 309 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 310 |
+
count = 0
|
| 311 |
+
for f in files:
|
| 312 |
+
try:
|
| 313 |
+
data = await f.read()
|
| 314 |
+
img = cv2.imdecode(np.frombuffer(data, np.uint8), cv2.IMREAD_COLOR)
|
| 315 |
+
if img is None:
|
| 316 |
+
continue
|
| 317 |
+
out_path = os.path.join(save_dir, f"face_{count}.jpg")
|
| 318 |
+
cv2.imwrite(out_path, img)
|
| 319 |
+
count += 1
|
| 320 |
+
except Exception:
|
| 321 |
+
continue
|
| 322 |
+
return {"employee_id": employee_id, "added": count, "total": len(os.listdir(save_dir))}
|
| 323 |
+
|
| 324 |
+
@app.websocket("/face/verify")
|
| 325 |
+
async def face_verify_ws(
|
| 326 |
+
websocket: WebSocket,
|
| 327 |
+
employee_id: str = Query(default="1"),
|
| 328 |
+
threshold: float = Query(default=SIM_THRESHOLD),
|
| 329 |
+
):
|
| 330 |
+
await websocket.accept()
|
| 331 |
+
employee_id = employee_id.strip() or "1"
|
| 332 |
+
|
| 333 |
+
motion_verifier = MotionVerifier(required_frames=5)
|
| 334 |
+
|
| 335 |
+
try:
|
| 336 |
+
while True:
|
| 337 |
+
try:
|
| 338 |
+
msg = await websocket.receive()
|
| 339 |
+
except WebSocketDisconnect:
|
| 340 |
+
break
|
| 341 |
+
|
| 342 |
+
img = None
|
| 343 |
+
if "bytes" in msg and msg["bytes"] is not None:
|
| 344 |
+
img = _bytes_to_bgr(msg["bytes"])
|
| 345 |
+
elif "text" in msg and msg["text"] is not None:
|
| 346 |
+
try:
|
| 347 |
+
obj = json.loads(msg["text"])
|
| 348 |
+
s = obj.get("frame") or obj.get("image")
|
| 349 |
+
if isinstance(s, str):
|
| 350 |
+
img = _b64_to_bgr(s)
|
| 351 |
+
except Exception:
|
| 352 |
+
img = None
|
| 353 |
+
|
| 354 |
+
if img is None:
|
| 355 |
+
try:
|
| 356 |
+
await websocket.send_json({"error": "no/invalid frame"})
|
| 357 |
+
except (RuntimeError, WebSocketDisconnect):
|
| 358 |
+
break
|
| 359 |
+
continue
|
| 360 |
+
|
| 361 |
+
if models["detector"] is None:
|
| 362 |
+
await websocket.send_json({"error": "Model not loaded"})
|
| 363 |
+
break
|
| 364 |
+
|
| 365 |
+
faces = await run_in_threadpool(models["detector"].detect, img)
|
| 366 |
+
|
| 367 |
+
if not faces:
|
| 368 |
+
motion_verifier.reset()
|
| 369 |
+
await websocket.send_json({
|
| 370 |
+
"status": "searching",
|
| 371 |
+
"message": "Wajah tidak ditemukan, posisikan wajah di tengah..."
|
| 372 |
+
})
|
| 373 |
+
continue
|
| 374 |
+
|
| 375 |
+
current_landmarks = faces[0]['landmarks']
|
| 376 |
+
motion_verifier.add_landmarks(current_landmarks)
|
| 377 |
+
|
| 378 |
+
if not motion_verifier.is_ready():
|
| 379 |
+
progress = len(motion_verifier.landmarks_buffer)
|
| 380 |
+
await websocket.send_json({
|
| 381 |
+
"status": "analyzing_motion",
|
| 382 |
+
"progress": f"{progress}/{motion_verifier.required_frames}",
|
| 383 |
+
"message": "Tahan posisi wajah, menganalisa gerakan..."
|
| 384 |
+
})
|
| 385 |
+
continue
|
| 386 |
+
|
| 387 |
+
passed_motion, reason = motion_verifier.check_motion()
|
| 388 |
+
|
| 389 |
+
if not passed_motion:
|
| 390 |
+
motion_verifier.reset()
|
| 391 |
+
await websocket.send_json({
|
| 392 |
+
"status": "failed",
|
| 393 |
+
"authorized": False,
|
| 394 |
+
"reason": reason,
|
| 395 |
+
"motion_check": False
|
| 396 |
+
})
|
| 397 |
+
continue
|
| 398 |
+
|
| 399 |
+
try:
|
| 400 |
+
det = await _run_pipeline(img, employee_id)
|
| 401 |
+
except Exception as e:
|
| 402 |
+
try:
|
| 403 |
+
await websocket.send_json({"error": f"{type(e).__name__}: {e}"})
|
| 404 |
+
except (RuntimeError, WebSocketDisconnect):
|
| 405 |
+
break
|
| 406 |
+
continue
|
| 407 |
+
|
| 408 |
+
if det is None:
|
| 409 |
+
payload = {
|
| 410 |
+
"employee_id": employee_id,
|
| 411 |
+
"threshold": threshold,
|
| 412 |
+
"detections": [],
|
| 413 |
+
"count": 0,
|
| 414 |
+
"authorized": False,
|
| 415 |
+
"reason": "no face found",
|
| 416 |
+
}
|
| 417 |
+
else:
|
| 418 |
+
payload = {
|
| 419 |
+
"employee_id": employee_id,
|
| 420 |
+
"threshold": threshold,
|
| 421 |
+
"detections": [det],
|
| 422 |
+
"count": 1,
|
| 423 |
+
"authorized": bool(det["authorized"]),
|
| 424 |
+
}
|
| 425 |
+
|
| 426 |
+
try:
|
| 427 |
+
await websocket.send_json(payload)
|
| 428 |
+
except (RuntimeError, WebSocketDisconnect):
|
| 429 |
+
break
|
| 430 |
+
finally:
|
| 431 |
+
return
|
| 432 |
+
|
| 433 |
+
@app.post("/face/clear")
|
| 434 |
+
async def face_clear(payload: ClearRequest):
|
| 435 |
+
employee_id = payload.employee_id.strip() or "1"
|
| 436 |
+
folder = os.path.join(FACE_DB_ROOT, employee_id)
|
| 437 |
+
removed = 0
|
| 438 |
+
if os.path.isdir(folder):
|
| 439 |
+
for f in os.listdir(folder):
|
| 440 |
+
try:
|
| 441 |
+
os.remove(os.path.join(folder, f))
|
| 442 |
+
removed += 1
|
| 443 |
+
except Exception:
|
| 444 |
+
pass
|
| 445 |
+
try:
|
| 446 |
+
os.rmdir(folder)
|
| 447 |
+
except Exception:
|
| 448 |
+
pass
|
| 449 |
+
return {"employee_id": employee_id, "removed": removed}
|
packages.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
libgl1
|
| 2 |
+
libgl1-mesa-glx
|
| 3 |
+
libglib2.0-0
|
requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn[standard]
|
| 3 |
+
numpy
|
| 4 |
+
onnxruntime-gpu
|
| 5 |
+
onnxruntime
|
| 6 |
+
pydantic
|
| 7 |
+
starlette
|
| 8 |
+
python-multipart
|
| 9 |
+
Pillow
|
| 10 |
+
typing_extensions
|
| 11 |
+
uniface
|
| 12 |
+
opencv-python
|