Update app.py
Browse files
app.py
CHANGED
|
@@ -1,283 +1,295 @@
|
|
| 1 |
-
import
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
-
|
| 5 |
from datetime import datetime
|
| 6 |
-
|
| 7 |
-
from
|
|
|
|
|
|
|
|
|
|
| 8 |
import faiss
|
| 9 |
-
|
| 10 |
-
import
|
| 11 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
-
#
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
# Ensure models directory exists
|
| 21 |
-
os.makedirs(os.path.dirname(model_path), exist_ok=True)
|
| 22 |
-
# Check if model_path is a directory
|
| 23 |
-
if os.path.isdir(model_path):
|
| 24 |
-
raise IsADirectoryError(
|
| 25 |
-
f"'{model_path}' is a directory, not a file. Please follow these steps:\n"
|
| 26 |
-
"1. Remove the directory using 'rm -r models/facenet_keras.h5'.\n"
|
| 27 |
-
"2. Download the FaceNet model '20180402-114759' from:\n"
|
| 28 |
-
" https://drive.google.com/file/d/1EXPBSXwTaqrSC0OhUdXNmKSh9qJUQ55-/view\n"
|
| 29 |
-
"3. Extract the zip file with 'unzip 20180402-114759.zip -d models/'.\n"
|
| 30 |
-
"4. Rename or move the .h5 file to 'models/facenet_keras.h5', e.g.,\n"
|
| 31 |
-
" 'mv models/20180402-114759.h5 models/facenet_keras.h5'.\n"
|
| 32 |
-
"5. Verify the file with 'ls -l models/facenet_keras.h5' (should show a file ~90 MB).\n"
|
| 33 |
-
"6. Check file permissions with 'chmod 644 models/facenet_keras.h5'.\n"
|
| 34 |
-
"7. Ensure a stable internet connection to avoid corrupted downloads.\n"
|
| 35 |
-
"8. Restart the application with 'python app.py'."
|
| 36 |
-
)
|
| 37 |
-
# Check if file is readable
|
| 38 |
-
if not os.access(model_path, os.R_OK):
|
| 39 |
-
raise PermissionError(
|
| 40 |
-
f"Cannot read '{model_path}'. Fix permissions with 'chmod 644 models/facenet_keras.h5' "
|
| 41 |
-
"and ensure directory access with 'chmod -R u+rw models'. Then restart with 'python app.py'."
|
| 42 |
-
)
|
| 43 |
try:
|
| 44 |
-
|
|
|
|
| 45 |
except Exception as e:
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
"2. Download the model '20180402-114759' from:\n"
|
| 50 |
-
" https://drive.google.com/file/d/1EXPBSXwTaqrSC0OhUdXNmKSh9qJUQ55-/view\n"
|
| 51 |
-
"3. Extract the zip file with 'unzip 20180402-114759.zip -d models/'.\n"
|
| 52 |
-
"4. Rename or move the .h5 file to 'models/facenet_keras.h5', e.g.,\n"
|
| 53 |
-
" 'mv models/20180402-114759.h5 models/facenet_keras.h5'.\n"
|
| 54 |
-
"5. Verify the file with 'ls -l models/facenet_keras.h5' (should show a file ~90 MB).\n"
|
| 55 |
-
"6. Check file permissions with 'chmod 644 models/facenet_keras.h5'.\n"
|
| 56 |
-
"7. Ensure a stable internet connection to avoid corrupted downloads.\n"
|
| 57 |
-
"8. If the file is empty or <90 MB, re-download it.\n"
|
| 58 |
-
"9. Restart the application with 'python app.py'."
|
| 59 |
-
)
|
| 60 |
-
return facenet_model
|
| 61 |
-
|
| 62 |
-
def detect_faces(frame):
|
| 63 |
-
"""Detect faces in a frame using MTCNN and return cropped face images."""
|
| 64 |
-
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 65 |
-
faces = detector.detect_faces(rgb_frame)
|
| 66 |
-
cropped_faces = []
|
| 67 |
-
boxes = []
|
| 68 |
-
for face in faces:
|
| 69 |
-
x, y, w, h = face['box']
|
| 70 |
-
x, y = max(0, x), max(0, y) # Ensure coordinates are within bounds
|
| 71 |
-
cropped = rgb_frame[y:y+h, x:x+w]
|
| 72 |
-
if cropped.size > 0:
|
| 73 |
-
cropped = Image.fromarray(cropped).resize((160, 160)) # FaceNet input size
|
| 74 |
-
cropped_faces.append(np.array(cropped))
|
| 75 |
-
boxes.append((x, y, w, h))
|
| 76 |
-
return cropped_faces, boxes
|
| 77 |
-
|
| 78 |
-
def get_embedding(face_image):
|
| 79 |
-
"""Generate 128D embedding using FaceNet."""
|
| 80 |
-
if facenet_model is None:
|
| 81 |
-
raise ValueError("FaceNet model not loaded.")
|
| 82 |
-
face_array = np.array(face_image, dtype=np.float32)
|
| 83 |
-
face_array = (face_array - 127.5) / 128.0 # FaceNet preprocessing
|
| 84 |
-
face_array = np.expand_dims(face_array, axis=0) # Shape: (1, 160, 160, 3)
|
| 85 |
-
embedding = facenet_model.predict(face_array, verbose=0)
|
| 86 |
-
return embedding[0] # 128D vector
|
| 87 |
-
|
| 88 |
-
def initialize_worker_db():
|
| 89 |
-
"""Initialize or load known worker embeddings database."""
|
| 90 |
-
db_path = "worker_embeddings.json"
|
| 91 |
-
if os.path.exists(db_path):
|
| 92 |
-
with open(db_path, 'r') as f:
|
| 93 |
-
data = json.load(f)
|
| 94 |
-
embeddings = np.array(data.get('embeddings', []), dtype=np.float32)
|
| 95 |
-
names = data.get('names', [])
|
| 96 |
-
else:
|
| 97 |
-
embeddings = np.empty((0, 128), dtype=np.float32)
|
| 98 |
-
names = []
|
| 99 |
-
return embeddings, names, db_path
|
| 100 |
-
|
| 101 |
-
def save_worker_db(embeddings, names, db_path):
|
| 102 |
-
"""Save worker embeddings to JSON."""
|
| 103 |
-
with open(db_path, 'w') as f:
|
| 104 |
-
json.dump({'embeddings': embeddings.tolist(), 'names': names}, f)
|
| 105 |
-
|
| 106 |
-
def match_embedding(new_embedding, known_embeddings, known_names, threshold=0.6):
|
| 107 |
-
"""Match new embedding using Cosine KNN."""
|
| 108 |
-
if known_embeddings.shape[0] == 0:
|
| 109 |
-
return "Unknown", 0.0
|
| 110 |
-
knn = NearestNeighbors(n_neighbors=1, metric="cosine")
|
| 111 |
-
knn.fit(known_embeddings)
|
| 112 |
-
distance, index = knn.kneighbors([new_embedding], return_distance=True)
|
| 113 |
-
similarity = 1 - distance[0][0] # Convert distance to similarity
|
| 114 |
-
if similarity > threshold:
|
| 115 |
-
return known_names[index[0][0]], similarity
|
| 116 |
-
return "Unknown", similarity
|
| 117 |
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
queue = json.load(f)
|
| 137 |
-
queue.append({
|
| 138 |
-
"worker_id": None,
|
| 139 |
-
"embedding": embedding.tolist(),
|
| 140 |
-
"timestamp": timestamp,
|
| 141 |
-
"camera_id": camera_id,
|
| 142 |
-
"verification_status": "Pending"
|
| 143 |
-
})
|
| 144 |
-
with open(queue_path, 'w') as f:
|
| 145 |
-
json.dump(queue, f)
|
| 146 |
-
return queue
|
| 147 |
|
| 148 |
-
#
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
-
#
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
except (RuntimeError, IsADirectoryError, PermissionError) as e:
|
| 158 |
-
error_message = (
|
| 159 |
-
f"{str(e)}\n\n"
|
| 160 |
-
"To resolve this:\n"
|
| 161 |
-
"1. If 'models/facenet_keras.h5' is a directory, remove it with 'rm -r models/facenet_keras.h5'.\n"
|
| 162 |
-
"2. Download the FaceNet model '20180402-114759' from:\n"
|
| 163 |
-
" https://drive.google.com/file/d/1EXPBSXwTaqrSC0OhUdXNmKSh9qJUQ55-/view\n"
|
| 164 |
-
"3. Extract the zip file with 'unzip 20180402-114759.zip -d models/'.\n"
|
| 165 |
-
"4. Rename or move the .h5 file to 'models/facenet_keras.h5', e.g.,\n"
|
| 166 |
-
" 'mv models/20180402-114759.h5 models/facenet_keras.h5'.\n"
|
| 167 |
-
"5. Verify the file with 'ls -l models/facenet_keras.h5' (should show a file ~90 MB).\n"
|
| 168 |
-
"6. Check file permissions with 'chmod 644 models/facenet_keras.h5' and 'chmod -R u+rw models'.\n"
|
| 169 |
-
"7. Ensure a stable internet connection to avoid corrupted downloads.\n"
|
| 170 |
-
"8. If the file is empty or <90 MB, re-download it.\n"
|
| 171 |
-
"9. Restart the application with 'python app.py'."
|
| 172 |
-
)
|
| 173 |
-
gr.Error(error_message)
|
| 174 |
-
raise
|
| 175 |
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
# Convert Gradio image (PIL) to OpenCV format
|
| 181 |
-
frame = np.array(frame)
|
| 182 |
-
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
| 183 |
-
|
| 184 |
-
# Detect faces
|
| 185 |
-
cropped_faces, face_boxes = detect_faces(frame)
|
| 186 |
-
output_frame = frame.copy()
|
| 187 |
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 194 |
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
else:
|
| 206 |
-
|
| 207 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
|
|
|
| 215 |
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 219 |
|
| 220 |
-
def
|
| 221 |
-
"""
|
| 222 |
-
global known_embeddings, known_names
|
| 223 |
-
if not worker_name or face_image is None:
|
| 224 |
-
return "Please provide a name and image.", None, None, None
|
| 225 |
-
|
| 226 |
-
# Process uploaded image
|
| 227 |
try:
|
| 228 |
-
|
| 229 |
-
embedding
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
|
| 231 |
-
#
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
else:
|
| 235 |
-
known_embeddings = np.vstack([known_embeddings, embedding])
|
| 236 |
-
known_names.append(worker_name)
|
| 237 |
-
save_worker_db(known_embeddings, known_names, db_path)
|
| 238 |
|
| 239 |
-
|
|
|
|
| 240 |
except Exception as e:
|
| 241 |
-
|
|
|
|
| 242 |
|
| 243 |
-
# Gradio interface
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
#
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
|
|
|
|
|
|
|
|
|
| 279 |
)
|
| 280 |
|
| 281 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 282 |
if __name__ == "__main__":
|
| 283 |
-
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
| 4 |
+
import pandas as pd
|
| 5 |
from datetime import datetime
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from twilio.rest import Client
|
| 8 |
+
from transformers import AutoModel, AutoFeatureExtractor
|
| 9 |
+
import torch
|
| 10 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 11 |
import faiss
|
| 12 |
+
import logging
|
| 13 |
+
import pickle
|
| 14 |
+
import time
|
| 15 |
+
|
| 16 |
+
# Setup logging
|
| 17 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
|
| 20 |
+
# Environment variables for Twilio
|
| 21 |
+
TWILIO_SID = os.getenv("TWILIO_ACCOUNT_SID")
|
| 22 |
+
TWILIO_TOKEN = os.getenv("TWILIO_AUTH_TOKEN")
|
| 23 |
+
TWILIO_PHONE = os.getenv("TWILIO_PHONE_NUMBER")
|
| 24 |
+
MANAGER_PHONE = os.getenv("MANAGER_PHONE", "+1234567890") # Default for testing, override in Hugging Face Secrets
|
| 25 |
|
| 26 |
+
# Initialize Twilio client
|
| 27 |
+
twilio_client = None
|
| 28 |
+
if TWILIO_SID and TWILIO_TOKEN and TWILIO_PHONE:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
try:
|
| 30 |
+
twilio_client = Client(TWILIO_SID, TWILIO_TOKEN)
|
| 31 |
+
logger.info("Twilio client initialized successfully")
|
| 32 |
except Exception as e:
|
| 33 |
+
logger.error(f"Failed to initialize Twilio client: {e}")
|
| 34 |
+
else:
|
| 35 |
+
logger.warning("Twilio credentials not set, notifications disabled")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
+
# Load FaceNet model from Hugging Face
|
| 38 |
+
try:
|
| 39 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
|
| 40 |
+
model = AutoModel.from_pretrained("google/vit-base-patch16-224")
|
| 41 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 42 |
+
model.to(device)
|
| 43 |
+
model.eval()
|
| 44 |
+
logger.info("FaceNet model loaded successfully")
|
| 45 |
+
except Exception as e:
|
| 46 |
+
logger.error(f"Failed to load FaceNet model: {e}")
|
| 47 |
+
raise e
|
| 48 |
|
| 49 |
+
# Initialize FAISS index for embedding matching
|
| 50 |
+
dimension = 768 # ViT base model output dimension
|
| 51 |
+
index = faiss.IndexFlatL2(dimension)
|
| 52 |
+
worker_db = {} # Dictionary to store worker_id: {name, embedding}
|
| 53 |
+
db_file = "worker_db.pkl"
|
| 54 |
+
log_file = "attendance_logs.csv"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
+
# Load existing worker database if available
|
| 57 |
+
if os.path.exists(db_file):
|
| 58 |
+
try:
|
| 59 |
+
with open(db_file, "rb") as f:
|
| 60 |
+
worker_db = pickle.load(f)
|
| 61 |
+
for worker_id, data in worker_db.items():
|
| 62 |
+
embedding = np.array(data["embedding"], dtype=np.float32)
|
| 63 |
+
index.add(embedding.reshape(1, -1))
|
| 64 |
+
logger.info(f"Loaded {len(worker_db)} workers from database")
|
| 65 |
+
except Exception as e:
|
| 66 |
+
logger.error(f"Failed to load worker database: {e}")
|
| 67 |
|
| 68 |
+
# Initialize attendance logs
|
| 69 |
+
if not os.path.exists(log_file):
|
| 70 |
+
pd.DataFrame(columns=["worker_id", "name", "timestamp", "camera_id", "verification_status"]).to_csv(log_file, index=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
+
# Buffer for camera feed loss
|
| 73 |
+
frame_buffer = []
|
| 74 |
+
MAX_BUFFER_SIZE = 100
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
+
def extract_embedding(image):
|
| 77 |
+
"""Extract face embedding using FaceNet model."""
|
| 78 |
+
try:
|
| 79 |
+
inputs = feature_extractor(images=image, return_tensors="pt")
|
| 80 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
| 81 |
+
with torch.no_grad():
|
| 82 |
+
outputs = model(**inputs)
|
| 83 |
+
embedding = outputs.last_hidden_state[:, 0, :].cpu().numpy() # CLS token
|
| 84 |
+
return embedding
|
| 85 |
+
except Exception as e:
|
| 86 |
+
logger.error(f"Error extracting embedding: {e}")
|
| 87 |
+
return None
|
| 88 |
|
| 89 |
+
def detect_faces(frame):
|
| 90 |
+
"""Detect faces in a frame using OpenCV."""
|
| 91 |
+
try:
|
| 92 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
| 93 |
+
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
|
| 94 |
+
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
| 95 |
+
return faces
|
| 96 |
+
except Exception as e:
|
| 97 |
+
logger.error(f"Error detecting faces: {e}")
|
| 98 |
+
return []
|
| 99 |
|
| 100 |
+
def process_frame(frame, camera_id="Gate_1"):
|
| 101 |
+
"""Process a single video frame for face recognition."""
|
| 102 |
+
global frame_buffer
|
| 103 |
+
try:
|
| 104 |
+
# Buffer frame for retry mechanism
|
| 105 |
+
if len(frame_buffer) < MAX_BUFFER_SIZE:
|
| 106 |
+
frame_buffer.append((frame.copy(), camera_id))
|
| 107 |
+
|
| 108 |
+
faces = detect_faces(frame)
|
| 109 |
+
results = []
|
| 110 |
+
|
| 111 |
+
for (x, y, w, h) in faces:
|
| 112 |
+
face = frame[y:y+h, x:x+w]
|
| 113 |
+
face_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
|
| 114 |
+
embedding = extract_embedding(face_rgb)
|
| 115 |
+
|
| 116 |
+
if embedding is None:
|
| 117 |
+
continue
|
| 118 |
+
|
| 119 |
+
# Search for nearest neighbor
|
| 120 |
+
embedding_np = embedding.astype(np.float32)
|
| 121 |
+
D, I = index.search(embedding_np, 1)
|
| 122 |
+
score = 1 - (D[0][0] / 2) # Convert L2 distance to similarity
|
| 123 |
+
|
| 124 |
+
timestamp = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
|
| 125 |
+
if score > 0.6 and I[0][0] >= 0: # Threshold for known worker
|
| 126 |
+
worker_id = list(worker_db.keys())[I[0][0]]
|
| 127 |
+
name = worker_db[worker_id]["name"]
|
| 128 |
+
verification_status = "Verified"
|
| 129 |
else:
|
| 130 |
+
worker_id = None
|
| 131 |
+
name = "Unknown"
|
| 132 |
+
verification_status = "Pending"
|
| 133 |
+
# Placeholder for Salesforce verification task
|
| 134 |
+
logger.info(f"Unknown face detected, would create Salesforce task for camera {camera_id} at {timestamp}")
|
| 135 |
+
|
| 136 |
+
# Check for duplicates (same worker, same day)
|
| 137 |
+
today = datetime.now().date().strftime("%Y-%m-%d")
|
| 138 |
+
logs = pd.read_csv(log_file)
|
| 139 |
+
if worker_id and not logs[(logs["worker_id"] == worker_id) & (logs["timestamp"].str.contains(today))].empty:
|
| 140 |
+
logger.info(f"Duplicate entry for worker {worker_id} on {today}, skipping")
|
| 141 |
+
continue
|
| 142 |
+
|
| 143 |
+
# Log attendance
|
| 144 |
+
log_entry = {
|
| 145 |
+
"worker_id": worker_id,
|
| 146 |
+
"name": name,
|
| 147 |
+
"timestamp": timestamp,
|
| 148 |
+
"camera_id": camera_id,
|
| 149 |
+
"verification_status": verification_status
|
| 150 |
+
}
|
| 151 |
+
pd.DataFrame([log_entry]).to_csv(log_file, mode="a", header=False, index=False)
|
| 152 |
+
results.append(f"Detected: {name} at {timestamp} (Status: {verification_status})")
|
| 153 |
+
|
| 154 |
+
# Send Twilio notification for unknown faces
|
| 155 |
+
if verification_status == "Pending" and twilio_client:
|
| 156 |
+
try:
|
| 157 |
+
message = twilio_client.messages.create(
|
| 158 |
+
from_=f"whatsapp:{TWILIO_PHONE}",
|
| 159 |
+
body=f"Unknown face detected at {camera_id} on {timestamp}. Please verify.",
|
| 160 |
+
to=f"whatsapp:{MANAGER_PHONE}"
|
| 161 |
+
)
|
| 162 |
+
logger.info(f"Twilio notification sent: {message.sid}")
|
| 163 |
+
except Exception as e:
|
| 164 |
+
logger.error(f"Failed to send Twilio notification: {e}")
|
| 165 |
+
|
| 166 |
+
return "\n".join(results) or "No faces detected"
|
| 167 |
+
except Exception as e:
|
| 168 |
+
logger.error(f"Error processing frame: {e}")
|
| 169 |
+
return f"Error: {str(e)}"
|
| 170 |
|
| 171 |
+
def retry_buffer():
|
| 172 |
+
"""Retry processing buffered frames during internet downtime."""
|
| 173 |
+
global frame_buffer
|
| 174 |
+
while frame_buffer:
|
| 175 |
+
frame, camera_id = frame_buffer.pop(0)
|
| 176 |
+
process_frame(frame, camera_id)
|
| 177 |
+
logger.info("Buffer cleared")
|
| 178 |
|
| 179 |
+
def generate_daily_report():
|
| 180 |
+
"""Generate daily attendance report."""
|
| 181 |
+
try:
|
| 182 |
+
logs = pd.read_csv(log_file)
|
| 183 |
+
today = datetime.now().date().strftime("%Y-%m-%d")
|
| 184 |
+
today_logs = logs[logs["timestamp"].str.contains(today)]
|
| 185 |
+
|
| 186 |
+
total_workers = len(today_logs)
|
| 187 |
+
new_workers = len(today_logs[today_logs["verification_status"] == "Pending"])
|
| 188 |
+
verified_workers = len(today_logs[today_logs["verification_status"] == "Verified"])
|
| 189 |
+
|
| 190 |
+
report = f"""
|
| 191 |
+
Daily Attendance Report ({today})
|
| 192 |
+
Total Workers: {total_workers}
|
| 193 |
+
Verified Workers: {verified_workers}
|
| 194 |
+
New/Unverified Workers: {new_workers}
|
| 195 |
+
"""
|
| 196 |
+
|
| 197 |
+
# Send report via Twilio
|
| 198 |
+
if twilio_client:
|
| 199 |
+
try:
|
| 200 |
+
message = twilio_client.messages.create(
|
| 201 |
+
from_=f"whatsapp:{TWILIO_PHONE}",
|
| 202 |
+
body=report,
|
| 203 |
+
to=f"whatsapp:{MANAGER_PHONE}"
|
| 204 |
+
)
|
| 205 |
+
logger.info(f"Daily report sent via Twilio: {message.sid}")
|
| 206 |
+
except Exception as e:
|
| 207 |
+
logger.error(f"Failed to send daily report: {e}")
|
| 208 |
+
|
| 209 |
+
return report
|
| 210 |
+
except Exception as e:
|
| 211 |
+
logger.error(f"Error generating report: {e}")
|
| 212 |
+
return f"Error: {str(e)}"
|
| 213 |
|
| 214 |
+
def add_worker(name, image):
|
| 215 |
+
"""Manually add a new worker (for testing)."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
try:
|
| 217 |
+
embedding = extract_embedding(image)
|
| 218 |
+
if embedding is None:
|
| 219 |
+
return "Failed to extract embedding"
|
| 220 |
+
|
| 221 |
+
worker_id = f"W{len(worker_db) + 1:04d}"
|
| 222 |
+
worker_db[worker_id] = {"name": name, "embedding": embedding}
|
| 223 |
+
|
| 224 |
+
# Update FAISS index
|
| 225 |
+
index.add(embedding.astype(np.float32).reshape(1, -1))
|
| 226 |
|
| 227 |
+
# Save to database
|
| 228 |
+
with open(db_file, "wb") as f:
|
| 229 |
+
pickle.dump(worker_db, f)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
|
| 231 |
+
logger.info(f"Added worker {worker_id}: {name}")
|
| 232 |
+
return f"Worker {name} added successfully"
|
| 233 |
except Exception as e:
|
| 234 |
+
logger.error(f"Error adding worker: {e}")
|
| 235 |
+
return f"Error: {str(e)}"
|
| 236 |
|
| 237 |
+
# Gradio interface for Hugging Face Space
|
| 238 |
+
def gradio_process_image(image):
|
| 239 |
+
"""Process uploaded image via Gradio interface."""
|
| 240 |
+
try:
|
| 241 |
+
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
|
| 242 |
+
return process_frame(image)
|
| 243 |
+
except Exception as e:
|
| 244 |
+
logger.error(f"Error in Gradio processing: {e}")
|
| 245 |
+
return f"Error: {str(e)}"
|
| 246 |
+
|
| 247 |
+
def gradio_video_feed():
|
| 248 |
+
"""Simulate video feed for Gradio (for demo)."""
|
| 249 |
+
cap = cv2.VideoCapture(0) # Fallback to default webcam or uploaded video
|
| 250 |
+
while True:
|
| 251 |
+
ret, frame = cap.read()
|
| 252 |
+
if not ret:
|
| 253 |
+
break
|
| 254 |
+
result = process_frame(frame)
|
| 255 |
+
yield frame, result
|
| 256 |
+
cap.release()
|
| 257 |
+
|
| 258 |
+
iface = gr.Interface(
|
| 259 |
+
fn=gradio_process_image,
|
| 260 |
+
inputs=gr.Image(type="pil", label="Upload Image for Face Detection"),
|
| 261 |
+
outputs="text",
|
| 262 |
+
title="SLAV System - Face Recognition",
|
| 263 |
+
description="Upload an image to test face recognition or use the live feed below."
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
# Add another tab for video feed
|
| 267 |
+
with gr.Blocks() as video_tab:
|
| 268 |
+
gr.Markdown("## Live Video Feed (Demo)")
|
| 269 |
+
video_output = gr.Image(label="Live Feed")
|
| 270 |
+
text_output = gr.Textbox(label="Detection Results")
|
| 271 |
+
gr.Interface(
|
| 272 |
+
fn=gradio_video_feed,
|
| 273 |
+
inputs=None,
|
| 274 |
+
outputs=[video_output, text_output],
|
| 275 |
+
live=True
|
| 276 |
)
|
| 277 |
|
| 278 |
+
# Add report and worker management tabs
|
| 279 |
+
with gr.Blocks() as report_tab:
|
| 280 |
+
gr.Markdown("## Daily Report")
|
| 281 |
+
report_button = gr.Button("Generate Report")
|
| 282 |
+
report_output = gr.Textbox(label="Report")
|
| 283 |
+
report_button.click(generate_daily_report, outputs=report_output)
|
| 284 |
+
|
| 285 |
+
with gr.Blocks() as worker_tab:
|
| 286 |
+
gr.Markdown("## Add New Worker")
|
| 287 |
+
name_input = gr.Textbox(label="Worker Name")
|
| 288 |
+
image_input = gr.Image(type="pil", label="Worker Face Image")
|
| 289 |
+
add_button = gr.Button("Add Worker")
|
| 290 |
+
add_output = gr.Textbox(label="Result")
|
| 291 |
+
add_button.click(add_worker, inputs=[name_input, image_input], outputs=add_output)
|
| 292 |
+
|
| 293 |
if __name__ == "__main__":
|
| 294 |
+
# Start Gradio interface
|
| 295 |
+
iface.launch(server_name="0.0.0.0", server_port=7860)
|