ecaaa09 commited on
Commit
da4ae75
·
1 Parent(s): 687fe58

Deploy SmartFace backend to Hugging Face Spaces

Browse files
Files changed (11) hide show
  1. .env.example +17 -0
  2. .gitignore +26 -0
  3. .python-version +1 -0
  4. Dockerfile +39 -0
  5. Procfile +1 -0
  6. README.md +21 -6
  7. app.py +452 -0
  8. railway.json +12 -0
  9. render.yaml +17 -0
  10. requirements.txt +13 -0
  11. runtime.txt +1 -0
.env.example ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Flask Configuration
2
+ FLASK_ENV=production
3
+ PORT=5000
4
+ HOST=0.0.0.0
5
+
6
+ # CORS Configuration - Add your Vercel frontend URL here
7
+ FRONTEND_URL=http://localhost:5173
8
+ # Production: FRONTEND_URL=https://your-app.vercel.app
9
+
10
+ # Model Configuration
11
+ MODEL_PATH=best_gacor.pth
12
+ IMG_SIZE=224
13
+
14
+ # Hugging Face Model (Optional - for deployment without committing .pth file)
15
+ USE_HUGGINGFACE=true
16
+ HF_MODEL_REPO=elsaelisa09/smartface-attendance-model
17
+ # HF_TOKEN=your_token_here (only needed for private models)
.gitignore ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Environment variables
2
+ .env
3
+
4
+ # Python
5
+ __pycache__/
6
+ *.py[cod]
7
+ *$py.class
8
+ *.so
9
+ .Python
10
+ env/
11
+ venv/
12
+ ENV/
13
+
14
+ # Model files (downloaded from Hugging Face)
15
+ *.pth
16
+ *.pt
17
+ model_cache/
18
+
19
+ # Data files
20
+ attendance.json
21
+
22
+ # IDE
23
+ .vscode/
24
+ .idea/
25
+ *.swp
26
+ *.swo
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.12.7
Dockerfile ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use Python 3.12
2
+ FROM python:3.12-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ libglib2.0-0 \
10
+ libsm6 \
11
+ libxext6 \
12
+ libxrender-dev \
13
+ libgomp1 \
14
+ && rm -rf /var/lib/apt/lists/*
15
+
16
+ # Copy requirements first for better caching
17
+ COPY requirements.txt .
18
+
19
+ # Install Python dependencies
20
+ RUN pip install --no-cache-dir -r requirements.txt
21
+
22
+ # Copy application files
23
+ COPY app.py .
24
+ COPY attendance.json .
25
+ COPY .env.example .
26
+
27
+ # Create model cache directory
28
+ RUN mkdir -p model_cache
29
+
30
+ # Expose port
31
+ EXPOSE 7860
32
+
33
+ # Set environment variables
34
+ ENV FLASK_ENV=production
35
+ ENV PORT=7860
36
+ ENV HOST=0.0.0.0
37
+
38
+ # Run the application
39
+ CMD ["gunicorn", "app:app", "--bind", "0.0.0.0:7860", "--workers", "1", "--timeout", "300", "--preload"]
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: gunicorn app:app --bind 0.0.0.0:$PORT --workers 1 --timeout 120
README.md CHANGED
@@ -1,11 +1,26 @@
1
  ---
2
- title: Smartface
3
- emoji: 📊
4
- colorFrom: gray
5
- colorTo: pink
6
  sdk: docker
7
  pinned: false
8
- license: mit
9
  ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: SmartFace Backend
3
+ emoji: 🤖
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: docker
7
  pinned: false
 
8
  ---
9
 
10
+ # SmartFace Attendance Backend
11
+
12
+ Face recognition backend for SmartFace attendance system using ResNet50 with ArcFace.
13
+
14
+ ## Features
15
+ - Face detection using MTCNN
16
+ - Face recognition using ResNet50 + ArcFace
17
+ - Attendance tracking
18
+ - CORS enabled for frontend integration
19
+
20
+ ## API Endpoints
21
+ - `GET /` - API info
22
+ - `GET /health` - Health check
23
+ - `POST /recognize` - Face recognition
24
+ - `POST /mark-attendance` - Mark attendance
25
+ - `GET /attendance` - Get attendance records
26
+ - `DELETE /attendance/<id>` - Delete attendance record
app.py ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ from flask_cors import CORS
3
+ import torch
4
+ import torch.nn as nn
5
+ from torchvision import transforms, models
6
+ from PIL import Image
7
+ import numpy as np
8
+ import cv2
9
+ import base64
10
+ import io
11
+ import pickle
12
+ import os
13
+ from datetime import datetime
14
+ import json
15
+ import torch.nn.functional as F
16
+ import subprocess
17
+ import sys
18
+ import time
19
+ from dotenv import load_dotenv
20
+
21
+ # Load environment variables
22
+ load_dotenv()
23
+
24
+ app = Flask(__name__)
25
+
26
+ # Configure CORS for production
27
+ frontend_url = os.getenv('FRONTEND_URL', 'http://localhost:5173')
28
+ CORS(app, resources={
29
+ r"/*": {
30
+ "origins": [frontend_url, "http://localhost:5173", "http://127.0.0.1:5173"],
31
+ "methods": ["GET", "POST", "DELETE", "OPTIONS"],
32
+ "allow_headers": ["Content-Type"]
33
+ }
34
+ })
35
+
36
+ # 1. Load Face Detector
37
+
38
+ try:
39
+ from facenet_pytorch import MTCNN
40
+ mtcnn = MTCNN(keep_all=False, device='cpu', post_process=False)
41
+ print("✓ MTCNN (Facenet-PyTorch) loaded successfully")
42
+ face_detector = 'mtcnn'
43
+ except Exception as e:
44
+ print(f"⚠ MTCNN not available: {e}")
45
+ mtcnn = None
46
+ face_detector = None
47
+
48
+
49
+ # 2. Device
50
+
51
+ device = torch.device('cpu') # kalau nanti mau GPU ganti ke 'cuda'
52
+
53
+
54
+ # 3. Model Definition
55
+ class ResNet50Embedding(nn.Module):
56
+ def __init__(self, embed_dim=512, p_drop=0.5):
57
+ super(ResNet50Embedding, self).__init__()
58
+ resnet = models.resnet50(weights=None) # state_dict ckpt akan override weight ini
59
+
60
+ in_features = resnet.fc.in_features # 2048
61
+ resnet.fc = nn.Identity() # buang fc bawaan
62
+
63
+ self.backbone = resnet
64
+ self.bn = nn.BatchNorm1d(in_features) # 2048
65
+ self.dropout = nn.Dropout(p_drop)
66
+ self.fc = nn.Linear(in_features, embed_dim) # 2048 -> 512
67
+
68
+ def forward(self, x):
69
+ x = self.backbone(x) # [B, 2048]
70
+ x = self.bn(x) # [B, 2048]
71
+ x = self.dropout(x)
72
+ x = self.fc(x) # [B, 512]
73
+ return x
74
+
75
+ # 4. Load ArcFace checkpoint
76
+ MODEL_PATH = os.getenv('MODEL_PATH', 'best_gacor.pth')
77
+ HF_MODEL_REPO = os.getenv('HF_MODEL_REPO', 'elsaelisa09/smartface-attendance-model')
78
+ USE_HUGGINGFACE = os.getenv('USE_HUGGINGFACE', 'true').lower() == 'true'
79
+
80
+ # Global variables for lazy loading
81
+ model = None
82
+ arc_weight = None
83
+ idx_to_class_map = {}
84
+ num_classes = 0
85
+ IMG_SIZE = 224
86
+ model_loading = False
87
+ model_loaded = False
88
+
89
+ def load_model():
90
+ """Lazy load model to avoid timeout on startup"""
91
+ global model, arc_weight, idx_to_class_map, num_classes, IMG_SIZE, MODEL_PATH, model_loading, model_loaded
92
+
93
+ if model_loaded:
94
+ return True
95
+
96
+ if model_loading:
97
+ return False
98
+
99
+ model_loading = True
100
+
101
+ try:
102
+ # Download model from Hugging Face if enabled and not exists locally
103
+ if USE_HUGGINGFACE and not os.path.exists(MODEL_PATH):
104
+ try:
105
+ from huggingface_hub import hf_hub_download
106
+ print(f"📥 Downloading model from Hugging Face: {HF_MODEL_REPO}")
107
+ MODEL_PATH = hf_hub_download(
108
+ repo_id=HF_MODEL_REPO,
109
+ filename="best_gacor.pth",
110
+ cache_dir="./model_cache"
111
+ )
112
+ print(f"✓ Model downloaded to: {MODEL_PATH}")
113
+ except Exception as e:
114
+ print(f"⚠ Failed to download from Hugging Face: {e}")
115
+ print(f" Falling back to local model: {MODEL_PATH}")
116
+
117
+ ckpt = torch.load(MODEL_PATH, map_location=device)
118
+
119
+ # --- Ambil info kelas ---
120
+ class_to_idx = ckpt.get("class_to_idx", {})
121
+ idx_to_class = ckpt.get("idx_to_class", {})
122
+ num_classes = len(class_to_idx) if class_to_idx else 70
123
+
124
+ # Normalisasi idx_to_class → dict idx:int -> label:str
125
+ if isinstance(idx_to_class, list):
126
+ idx_to_class_map = {i: lbl for i, lbl in enumerate(idx_to_class)}
127
+ elif isinstance(idx_to_class, dict) and all(isinstance(k, int) for k in idx_to_class.keys()):
128
+ idx_to_class_map = idx_to_class
129
+ elif isinstance(idx_to_class, dict) and all(isinstance(v, int) for v in idx_to_class.values()):
130
+ # kasus LABEL→INT
131
+ idx_to_class_map = {v: k for k, v in idx_to_class.items()}
132
+ else:
133
+ idx_to_class_map = {i: f"class_{i}" for i in range(num_classes)}
134
+
135
+ # --- ukuran gambar dari ckpt (kalau ada) ---
136
+ IMG_SIZE = ckpt.get("img_size", 224)
137
+
138
+ # --- Bangun model embedding dan load state_dict ---
139
+ model = ResNet50Embedding(embed_dim=512, p_drop=0.5)
140
+ model.load_state_dict(ckpt["model"])
141
+ model.to(device).eval()
142
+
143
+ # --- Ambil weight ArcFace ---
144
+ arc_state = ckpt["arc"]
145
+ if isinstance(arc_state, dict) and "weight" in arc_state:
146
+ arc_weight = arc_state["weight"]
147
+ else:
148
+ arc_weight = arc_state.weight
149
+ arc_weight = arc_weight.to(device)
150
+
151
+ print("✓ ArcFace checkpoint loaded successfully!")
152
+ print(f" Classes: {num_classes}")
153
+ print(f" Sample labels: {[idx_to_class_map[i] for i in list(idx_to_class_map.keys())[:5]]} ...")
154
+
155
+ model_loaded = True
156
+ model_loading = False
157
+ return True
158
+
159
+ except Exception as e:
160
+ print(f"✗ Error loading model: {e}")
161
+ print(f" Make sure {MODEL_PATH} exists in the backend folder")
162
+ model_loading = False
163
+ return False
164
+
165
+
166
+ # Transform (HARUS sama dgn val_tfms)
167
+
168
+ test_transform = transforms.Compose([
169
+ transforms.Resize((IMG_SIZE, IMG_SIZE)),
170
+ transforms.ToTensor(),
171
+ transforms.Normalize(mean=[0.5, 0.5, 0.5],
172
+ std =[0.5, 0.5, 0.5]),
173
+ ])
174
+
175
+ # ==========================
176
+ # 6. Attendance storage
177
+ # ==========================
178
+ ATTENDANCE_FILE = 'attendance.json'
179
+
180
+ def load_attendance():
181
+ if os.path.exists(ATTENDANCE_FILE):
182
+ try:
183
+ with open(ATTENDANCE_FILE, 'r') as f:
184
+ content = f.read().strip()
185
+ if not content:
186
+ return []
187
+ return json.loads(content)
188
+ except Exception:
189
+ return []
190
+ return []
191
+
192
+ def save_attendance(attendance_list):
193
+ with open(ATTENDANCE_FILE, 'w') as f:
194
+ json.dump(attendance_list, f, indent=2)
195
+
196
+
197
+ # Face Detection
198
+
199
+ def detect_and_crop_face(image_array):
200
+ """Detect face using MTCNN (Facenet-PyTorch) and return cropped face"""
201
+ if mtcnn is None:
202
+ # Fallback: return center crop if MTCNN not available
203
+ h, w = image_array.shape[:2]
204
+ size = min(h, w)
205
+ y1 = (h - size) // 2
206
+ x1 = (w - size) // 2
207
+ return image_array[y1:y1+size, x1:x1+size], None
208
+
209
+ # Convert BGR to RGB for MTCNN
210
+ image_rgb = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)
211
+ image_pil = Image.fromarray(image_rgb)
212
+
213
+ # Detect faces with MTCNN
214
+ boxes, probs = mtcnn.detect(image_pil)
215
+
216
+ if boxes is None or len(boxes) == 0:
217
+ return None, None
218
+
219
+ # Get first detected face (highest confidence)
220
+ box = boxes[0]
221
+ x1, y1, x2, y2 = box.astype(int)
222
+
223
+ # Safety clamp
224
+ h, w = image_array.shape[:2]
225
+ x1 = max(0, x1); y1 = max(0, y1)
226
+ x2 = min(w, x2); y2 = min(h, y2)
227
+
228
+ face_crop = image_array[y1:y2, x1:x2]
229
+ bbox = {'x1': int(x1), 'y1': int(y1), 'x2': int(x2), 'y2': int(y2)}
230
+
231
+ return face_crop, bbox
232
+
233
+ # Prediction
234
+
235
+ def predict_identity(face_image):
236
+ """Predict identity from face image (ArcFace ResNet50)"""
237
+ if model is None or arc_weight is None or not idx_to_class_map:
238
+ return []
239
+
240
+ # Convert to PIL if ndarray
241
+ if isinstance(face_image, np.ndarray):
242
+ face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
243
+ face_image = Image.fromarray(face_image)
244
+
245
+ img_tensor = test_transform(face_image).unsqueeze(0).to(device)
246
+
247
+ with torch.no_grad():
248
+ SCALE = 20.0 # Scale Mirip Confidence
249
+ emb = model(img_tensor) # [1, 512]
250
+ emb_norm = F.normalize(emb, dim=1)
251
+ w_norm = F.normalize(arc_weight, dim=1) # [C, 512]
252
+ logits = torch.matmul(emb_norm, w_norm.t()) * SCALE
253
+ probabilities = torch.softmax(logits, dim=1)[0]
254
+
255
+ # Top 3 predictions
256
+ top3_prob, top3_idx = torch.topk(probabilities, 3)
257
+
258
+ predictions = []
259
+ for prob, idx in zip(top3_prob, top3_idx):
260
+ idx_int = idx.item()
261
+ label = idx_to_class_map.get(idx_int, f"class_{idx_int}")
262
+ confidence = prob.item() * 100
263
+ predictions.append({
264
+ 'label': label,
265
+ 'confidence': round(confidence, 2)
266
+ })
267
+
268
+ return predictions
269
+
270
+ # API ROUTES
271
+
272
+ @app.route('/health', methods=['GET'])
273
+ def health():
274
+ # Quick response for health check - don't wait for model
275
+ return jsonify({
276
+ 'status': 'ok',
277
+ 'model_loaded': model_loaded,
278
+ 'model_loading': model_loading,
279
+ 'face_detector': face_detector,
280
+ 'mtcnn_loaded': mtcnn is not None
281
+ })
282
+
283
+ @app.route('/recognize', methods=['POST'])
284
+ def recognize():
285
+ # Lazy load model on first request
286
+ if not model_loaded:
287
+ load_model()
288
+
289
+ try:
290
+ data = request.get_json()
291
+
292
+ image_data = data.get('image', '')
293
+ if image_data.startswith('data:image'):
294
+ image_data = image_data.split(',')[1]
295
+
296
+ image_bytes = base64.b64decode(image_data)
297
+ image_array = np.frombuffer(image_bytes, dtype=np.uint8)
298
+ image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
299
+
300
+ if image is None:
301
+ return jsonify({'error': 'Invalid image'}), 400
302
+
303
+ face_crop, bbox = detect_and_crop_face(image)
304
+
305
+ if face_crop is None:
306
+ return jsonify({'error': 'No face detected'}), 400
307
+
308
+ predictions = predict_identity(face_crop)
309
+
310
+ if not predictions:
311
+ return jsonify({'error': 'Model not available'}), 500
312
+
313
+ # Encode cropped face
314
+ _, buffer = cv2.imencode('.jpg', face_crop)
315
+ face_base64 = base64.b64encode(buffer).decode('utf-8')
316
+
317
+ # Draw bbox only (no label text overlay)
318
+ if bbox:
319
+ cv2.rectangle(image, (bbox['x1'], bbox['y1']), (bbox['x2'], bbox['y2']), (0, 255, 0), 4)
320
+
321
+ _, buffer = cv2.imencode('.jpg', image)
322
+ annotated_base64 = base64.b64encode(buffer).decode('utf-8')
323
+
324
+ return jsonify({
325
+ 'success': True,
326
+ 'bbox': bbox,
327
+ 'face_image': f'data:image/jpeg;base64,{face_base64}',
328
+ 'annotated_image': f'data:image/jpeg;base64,{annotated_base64}',
329
+ 'predictions': predictions
330
+ })
331
+
332
+ except Exception as e:
333
+ return jsonify({'error': str(e)}), 500
334
+
335
+ @app.route('/mark-attendance', methods=['POST'])
336
+ def mark_attendance():
337
+ try:
338
+ data = request.get_json()
339
+
340
+ label = data.get('label')
341
+ confidence = data.get('confidence')
342
+ image = data.get('image')
343
+
344
+ if not label or confidence is None:
345
+ return jsonify({'error': 'Missing required fields'}), 400
346
+
347
+ attendance_record = {
348
+ 'id': len(load_attendance()) + 1,
349
+ 'label': label,
350
+ 'confidence': confidence,
351
+ 'timestamp': datetime.now().isoformat(),
352
+ 'date': datetime.now().strftime('%Y-%m-%d'),
353
+ 'time': datetime.now().strftime('%H:%M:%S'),
354
+ 'status': 'present',
355
+ 'image': image
356
+ }
357
+
358
+ attendance_list = load_attendance()
359
+
360
+ today = datetime.now().strftime('%Y-%m-%d')
361
+ already_marked = any(
362
+ record['label'] == label and record['date'] == today
363
+ for record in attendance_list
364
+ )
365
+
366
+ if already_marked:
367
+ return jsonify({
368
+ 'success': False,
369
+ 'message': f'{label} sudah absen hari ini'
370
+ }), 400
371
+
372
+ attendance_list.append(attendance_record)
373
+ save_attendance(attendance_list)
374
+
375
+ return jsonify({
376
+ 'success': True,
377
+ 'message': f'Absensi {label} berhasil dicatat',
378
+ 'record': attendance_record
379
+ })
380
+
381
+ except Exception as e:
382
+ return jsonify({'error': str(e)}), 500
383
+
384
+ @app.route('/attendance', methods=['GET'])
385
+ def get_attendance():
386
+ try:
387
+ attendance_list = load_attendance()
388
+
389
+ date_filter = request.args.get('date')
390
+ if date_filter:
391
+ attendance_list = [
392
+ record for record in attendance_list
393
+ if record['date'] == date_filter
394
+ ]
395
+
396
+ return jsonify({
397
+ 'success': True,
398
+ 'data': attendance_list,
399
+ 'total': len(attendance_list)
400
+ })
401
+
402
+ except Exception as e:
403
+ return jsonify({'error': str(e)}), 500
404
+
405
+ @app.route('/attendance/<int:id>', methods=['DELETE'])
406
+ def delete_attendance(id):
407
+ try:
408
+ attendance_list = load_attendance()
409
+ attendance_list = [record for record in attendance_list if record['id'] != id]
410
+ save_attendance(attendance_list)
411
+
412
+ return jsonify({
413
+ 'success': True,
414
+ 'message': 'Attendance record deleted'
415
+ })
416
+
417
+ except Exception as e:
418
+ return jsonify({'error': str(e)}), 500
419
+
420
+ # Root endpoint for health check
421
+ @app.route('/', methods=['GET'])
422
+ def root():
423
+ # Quick response for render health check
424
+ return jsonify({
425
+ 'message': 'SmartFace Attendance API',
426
+ 'status': 'running',
427
+ 'model_status': 'loaded' if model_loaded else ('loading' if model_loading else 'not_loaded'),
428
+ 'endpoints': {
429
+ 'health': '/health',
430
+ 'recognize': '/recognize [POST]',
431
+ 'mark_attendance': '/mark-attendance [POST]',
432
+ 'get_attendance': '/attendance [GET]',
433
+ 'delete_attendance': '/attendance/<id> [DELETE]'
434
+ }
435
+ })
436
+
437
+ if __name__ == '__main__':
438
+ print("="*80)
439
+ print(" STARTING FACE RECOGNITION ATTENDANCE SYSTEM")
440
+ print("="*80)
441
+ print(f"Model: {'✓ Loaded' if model else '✗ Not loaded'}")
442
+ print(f"Face Detector: {'✓ MTCNN' if mtcnn else '✗ Not loaded'}")
443
+ print(f"Classes: {num_classes}")
444
+ print(f"Frontend URL: {frontend_url}")
445
+ print("="*80)
446
+
447
+ # Get port from environment variable (for production deployment)
448
+ port = int(os.getenv('PORT', 5000))
449
+ host = os.getenv('HOST', '0.0.0.0')
450
+ debug = os.getenv('FLASK_ENV', 'development') == 'development'
451
+
452
+ app.run(debug=debug, host=host, port=port)
railway.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://railway.app/railway.schema.json",
3
+ "build": {
4
+ "builder": "NIXPACKS",
5
+ "buildCommand": "pip install -r requirements.txt"
6
+ },
7
+ "deploy": {
8
+ "startCommand": "gunicorn app:app --bind 0.0.0.0:$PORT --workers 1 --timeout 120",
9
+ "restartPolicyType": "ON_FAILURE",
10
+ "restartPolicyMaxRetries": 10
11
+ }
12
+ }
render.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ - type: web
3
+ name: smartface-backend
4
+ env: python
5
+ region: singapore
6
+ plan: starter
7
+ buildCommand: pip install -r requirements.txt
8
+ startCommand: gunicorn app:app --bind 0.0.0.0:$PORT --workers 1 --timeout 300 --preload --max-requests 100 --max-requests-jitter 10
9
+ envVars:
10
+ - key: FLASK_ENV
11
+ value: production
12
+ - key: FRONTEND_URL
13
+ sync: false
14
+ - key: MODEL_PATH
15
+ value: best_gacor.pth
16
+ - key: IMG_SIZE
17
+ value: 224
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ flask==3.1.0
2
+ flask-cors==5.0.0
3
+ torch==2.2.2
4
+ torchvision==0.17.2
5
+ pillow>=10.2.0,<10.3.0
6
+ opencv-python-headless==4.10.0.84
7
+ numpy>=1.24.0,<2.0.0
8
+ onnxruntime==1.21.0
9
+ scikit-learn==1.5.2
10
+ facenet-pytorch==2.6.0
11
+ gunicorn==21.2.0
12
+ python-dotenv==1.0.0
13
+ huggingface-hub==0.26.2
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.12