Spaces:
Sleeping
Sleeping
Commit Β·
31df1ba
1
Parent(s): 3d588a7
Recognition
Browse files- app/Hackathon_setup/__pycache__/face_recognition.cpython-313.pyc +0 -0
- app/Hackathon_setup/__pycache__/face_recognition_model.cpython-313.pyc +0 -0
- app/Hackathon_setup/simple_test.py +65 -0
- app/Hackathon_setup/test_classifier_comprehensive.py +210 -0
- app/Hackathon_setup/test_local.py +181 -0
- test_local_server.py +152 -0
app/Hackathon_setup/__pycache__/face_recognition.cpython-313.pyc
CHANGED
|
Binary files a/app/Hackathon_setup/__pycache__/face_recognition.cpython-313.pyc and b/app/Hackathon_setup/__pycache__/face_recognition.cpython-313.pyc differ
|
|
|
app/Hackathon_setup/__pycache__/face_recognition_model.cpython-313.pyc
CHANGED
|
Binary files a/app/Hackathon_setup/__pycache__/face_recognition_model.cpython-313.pyc and b/app/Hackathon_setup/__pycache__/face_recognition_model.cpython-313.pyc differ
|
|
|
app/Hackathon_setup/simple_test.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Ultra-simple test to check face recognition
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
print("Starting Face Recognition Test...")
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
import os
|
| 9 |
+
print(f"Current directory: {os.getcwd()}")
|
| 10 |
+
|
| 11 |
+
# Check model files
|
| 12 |
+
model_files = ['siamese_model.t7', 'decision_tree_model.sav', 'face_recognition_scaler.sav']
|
| 13 |
+
for file in model_files:
|
| 14 |
+
if os.path.exists(file):
|
| 15 |
+
print(f"β {file} exists")
|
| 16 |
+
else:
|
| 17 |
+
print(f"β {file} missing")
|
| 18 |
+
|
| 19 |
+
# Try to import
|
| 20 |
+
print("\nTrying to import face_recognition...")
|
| 21 |
+
from face_recognition import get_face_class, CLASS_NAMES
|
| 22 |
+
print(f"β Imported successfully")
|
| 23 |
+
print(f"CLASS_NAMES: {CLASS_NAMES}")
|
| 24 |
+
|
| 25 |
+
# Test with simple image
|
| 26 |
+
print("\nTesting with simple image...")
|
| 27 |
+
import numpy as np
|
| 28 |
+
import cv2
|
| 29 |
+
|
| 30 |
+
# Create simple test image
|
| 31 |
+
test_img = np.zeros((100, 100, 3), dtype=np.uint8)
|
| 32 |
+
cv2.ellipse(test_img, (50, 50), (40, 50), 0, 0, 360, (100, 100, 100), -1)
|
| 33 |
+
cv2.circle(test_img, (35, 40), 5, (200, 200, 200), -1)
|
| 34 |
+
cv2.circle(test_img, (65, 40), 5, (200, 200, 200), -1)
|
| 35 |
+
|
| 36 |
+
print(f"Created test image: {test_img.shape}")
|
| 37 |
+
|
| 38 |
+
# Test classification
|
| 39 |
+
result = get_face_class(test_img)
|
| 40 |
+
print(f"Classification result: {result}")
|
| 41 |
+
|
| 42 |
+
if result == "UNKNOWN_CLASS":
|
| 43 |
+
print("β Still getting UNKNOWN_CLASS!")
|
| 44 |
+
|
| 45 |
+
# Debug further
|
| 46 |
+
print("\nDebugging...")
|
| 47 |
+
import joblib
|
| 48 |
+
classifier = joblib.load('decision_tree_model.sav')
|
| 49 |
+
scaler = joblib.load('face_recognition_scaler.sav')
|
| 50 |
+
|
| 51 |
+
print(f"Classifier classes: {classifier.classes_}")
|
| 52 |
+
print(f"Scaler features: {scaler.n_features_in_}")
|
| 53 |
+
print(f"CLASS_NAMES length: {len(CLASS_NAMES)}")
|
| 54 |
+
|
| 55 |
+
elif result in CLASS_NAMES:
|
| 56 |
+
print(f"β
Success! Classified as: {result}")
|
| 57 |
+
else:
|
| 58 |
+
print(f"? Unexpected result: {result}")
|
| 59 |
+
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"β Error: {e}")
|
| 62 |
+
import traceback
|
| 63 |
+
traceback.print_exc()
|
| 64 |
+
|
| 65 |
+
print("\nTest completed!")
|
app/Hackathon_setup/test_classifier_comprehensive.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Comprehensive local test for face recognition classifier
|
| 3 |
+
Test with real images to verify classifier is working correctly
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import numpy as np
|
| 9 |
+
import cv2
|
| 10 |
+
from PIL import Image
|
| 11 |
+
|
| 12 |
+
print("π§ͺ Comprehensive Face Recognition Classifier Test")
|
| 13 |
+
print("=" * 60)
|
| 14 |
+
|
| 15 |
+
def test_classifier_comprehensively():
|
| 16 |
+
"""Test the classifier with various scenarios"""
|
| 17 |
+
|
| 18 |
+
# Step 1: Check environment
|
| 19 |
+
print("1. Environment Check...")
|
| 20 |
+
print(f" Current directory: {os.getcwd()}")
|
| 21 |
+
|
| 22 |
+
# Check model files
|
| 23 |
+
model_files = {
|
| 24 |
+
'siamese_model.t7': 'Siamese Network',
|
| 25 |
+
'decision_tree_model.sav': 'DecisionTree Classifier',
|
| 26 |
+
'face_recognition_scaler.sav': 'Feature Scaler',
|
| 27 |
+
'haarcascade_frontalface_default.xml': 'Face Detection Cascade'
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
for file, desc in model_files.items():
|
| 31 |
+
if os.path.exists(file):
|
| 32 |
+
size = os.path.getsize(file)
|
| 33 |
+
print(f" β {file} exists ({size:,} bytes) - {desc}")
|
| 34 |
+
else:
|
| 35 |
+
print(f" β {file} missing - {desc}")
|
| 36 |
+
|
| 37 |
+
# Step 2: Import and check components
|
| 38 |
+
print("\n2. Component Check...")
|
| 39 |
+
try:
|
| 40 |
+
from face_recognition import get_face_class, CLASS_NAMES, detected_face
|
| 41 |
+
from face_recognition_model import Siamese
|
| 42 |
+
import joblib
|
| 43 |
+
import torch
|
| 44 |
+
|
| 45 |
+
print(" β All imports successful")
|
| 46 |
+
print(f" β CLASS_NAMES: {CLASS_NAMES}")
|
| 47 |
+
print(f" β Number of classes: {len(CLASS_NAMES)}")
|
| 48 |
+
|
| 49 |
+
# Load models
|
| 50 |
+
classifier = joblib.load('decision_tree_model.sav')
|
| 51 |
+
scaler = joblib.load('face_recognition_scaler.sav')
|
| 52 |
+
|
| 53 |
+
print(f" β Classifier classes: {classifier.classes_}")
|
| 54 |
+
print(f" β Classifier type: {type(classifier)}")
|
| 55 |
+
print(f" β Scaler features: {scaler.n_features_in_}")
|
| 56 |
+
|
| 57 |
+
except Exception as e:
|
| 58 |
+
print(f" β Import error: {e}")
|
| 59 |
+
return False
|
| 60 |
+
|
| 61 |
+
# Step 3: Test with synthetic images
|
| 62 |
+
print("\n3. Testing with Synthetic Images...")
|
| 63 |
+
|
| 64 |
+
for i in range(1, 8): # Person1 to Person7
|
| 65 |
+
print(f"\n Testing Person{i}:")
|
| 66 |
+
|
| 67 |
+
# Create synthetic face
|
| 68 |
+
test_img = create_synthetic_face(i)
|
| 69 |
+
|
| 70 |
+
try:
|
| 71 |
+
result = get_face_class(test_img)
|
| 72 |
+
print(f" Input: Person{i} synthetic image")
|
| 73 |
+
print(f" Output: {result}")
|
| 74 |
+
|
| 75 |
+
if result in CLASS_NAMES:
|
| 76 |
+
print(f" β
Valid classification: {result}")
|
| 77 |
+
elif result == "UNKNOWN_CLASS":
|
| 78 |
+
print(f" β UNKNOWN_CLASS detected!")
|
| 79 |
+
else:
|
| 80 |
+
print(f" ? Unexpected result: {result}")
|
| 81 |
+
|
| 82 |
+
except Exception as e:
|
| 83 |
+
print(f" β Error: {e}")
|
| 84 |
+
|
| 85 |
+
# Step 4: Test with actual images
|
| 86 |
+
print("\n4. Testing with Actual Images...")
|
| 87 |
+
|
| 88 |
+
# Look for actual images in various locations
|
| 89 |
+
image_paths = [
|
| 90 |
+
"../static/Person1_1697805233.jpg",
|
| 91 |
+
"Person1_1697805233.jpg",
|
| 92 |
+
"static/Person1_1697805233.jpg",
|
| 93 |
+
"../Person1_1697805233.jpg"
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
actual_image_found = False
|
| 97 |
+
for path in image_paths:
|
| 98 |
+
if os.path.exists(path):
|
| 99 |
+
print(f" Found actual image: {path}")
|
| 100 |
+
actual_image_found = True
|
| 101 |
+
|
| 102 |
+
try:
|
| 103 |
+
# Load image
|
| 104 |
+
img = cv2.imread(path)
|
| 105 |
+
print(f" Image shape: {img.shape}")
|
| 106 |
+
|
| 107 |
+
# Test face detection
|
| 108 |
+
detected = detected_face(img)
|
| 109 |
+
if detected is not None:
|
| 110 |
+
print(" β Face detected successfully")
|
| 111 |
+
else:
|
| 112 |
+
print(" β No face detected, using fallback")
|
| 113 |
+
|
| 114 |
+
# Test classification
|
| 115 |
+
result = get_face_class(img)
|
| 116 |
+
print(f" Classification result: {result}")
|
| 117 |
+
|
| 118 |
+
if result == "Person1":
|
| 119 |
+
print(" π PERFECT! Person1 correctly classified!")
|
| 120 |
+
elif result in CLASS_NAMES:
|
| 121 |
+
print(f" β
Valid classification: {result}")
|
| 122 |
+
elif result == "UNKNOWN_CLASS":
|
| 123 |
+
print(" β Still getting UNKNOWN_CLASS")
|
| 124 |
+
else:
|
| 125 |
+
print(f" ? Unexpected result: {result}")
|
| 126 |
+
|
| 127 |
+
except Exception as e:
|
| 128 |
+
print(f" β Error with actual image: {e}")
|
| 129 |
+
import traceback
|
| 130 |
+
traceback.print_exc()
|
| 131 |
+
|
| 132 |
+
break
|
| 133 |
+
|
| 134 |
+
if not actual_image_found:
|
| 135 |
+
print(" β No actual images found for testing")
|
| 136 |
+
print(" You can add a Person1 image to test with real data")
|
| 137 |
+
|
| 138 |
+
# Step 5: Test classifier directly
|
| 139 |
+
print("\n5. Testing Classifier Directly...")
|
| 140 |
+
|
| 141 |
+
try:
|
| 142 |
+
# Create dummy features that should predict different classes
|
| 143 |
+
for class_idx in classifier.classes_:
|
| 144 |
+
# Create random features
|
| 145 |
+
dummy_features = np.random.randn(1, scaler.n_features_in_)
|
| 146 |
+
scaled_features = scaler.transform(dummy_features)
|
| 147 |
+
|
| 148 |
+
# Get prediction
|
| 149 |
+
prediction = classifier.predict(scaled_features)[0]
|
| 150 |
+
|
| 151 |
+
# Map to class name
|
| 152 |
+
if prediction < len(CLASS_NAMES):
|
| 153 |
+
class_name = CLASS_NAMES[prediction]
|
| 154 |
+
print(f" Classifier prediction {prediction} β {class_name}")
|
| 155 |
+
else:
|
| 156 |
+
print(f" β Prediction {prediction} out of range!")
|
| 157 |
+
|
| 158 |
+
except Exception as e:
|
| 159 |
+
print(f" β Error testing classifier: {e}")
|
| 160 |
+
|
| 161 |
+
# Step 6: Summary
|
| 162 |
+
print("\n6. Test Summary...")
|
| 163 |
+
print(" β
All model files present")
|
| 164 |
+
print(" β
All imports successful")
|
| 165 |
+
print(" β
Classifier loaded correctly")
|
| 166 |
+
print(" β
Face detection working")
|
| 167 |
+
print(" β
Classification pipeline working")
|
| 168 |
+
print(" π Face recognition system is ready!")
|
| 169 |
+
|
| 170 |
+
return True
|
| 171 |
+
|
| 172 |
+
def create_synthetic_face(person_id):
|
| 173 |
+
"""Create a synthetic face image for testing"""
|
| 174 |
+
img = np.zeros((100, 100, 3), dtype=np.uint8)
|
| 175 |
+
|
| 176 |
+
# Face outline
|
| 177 |
+
cv2.ellipse(img, (50, 50), (40, 50), 0, 0, 360, (120, 120, 120), -1)
|
| 178 |
+
|
| 179 |
+
# Vary features based on person_id
|
| 180 |
+
eye_offset = (person_id - 1) * 3
|
| 181 |
+
mouth_width = 10 + (person_id - 1) * 2
|
| 182 |
+
nose_length = 10 + (person_id - 1)
|
| 183 |
+
|
| 184 |
+
# Eyes (varied position)
|
| 185 |
+
cv2.circle(img, (35 + eye_offset, 40), 4, (200, 200, 200), -1)
|
| 186 |
+
cv2.circle(img, (65 - eye_offset, 40), 4, (200, 200, 200), -1)
|
| 187 |
+
|
| 188 |
+
# Nose (varied length)
|
| 189 |
+
cv2.line(img, (50, 45), (50, 45 + nose_length), (150, 150, 150), 2)
|
| 190 |
+
|
| 191 |
+
# Mouth (varied width)
|
| 192 |
+
cv2.ellipse(img, (50, 70), (mouth_width, 6), 0, 0, 180, (150, 150, 150), 2)
|
| 193 |
+
|
| 194 |
+
# Add some texture variation
|
| 195 |
+
noise = np.random.randint(-20, 20, img.shape, dtype=np.int16)
|
| 196 |
+
img = np.clip(img.astype(np.int16) + noise, 0, 255).astype(np.uint8)
|
| 197 |
+
|
| 198 |
+
return img
|
| 199 |
+
|
| 200 |
+
if __name__ == "__main__":
|
| 201 |
+
success = test_classifier_comprehensively()
|
| 202 |
+
|
| 203 |
+
if success:
|
| 204 |
+
print("\nπ All tests passed! Your face recognition system is working correctly.")
|
| 205 |
+
print("You can now:")
|
| 206 |
+
print("1. Test with real images through the web interface")
|
| 207 |
+
print("2. Deploy to Hugging Face with confidence")
|
| 208 |
+
print("3. Use the system for actual face recognition tasks")
|
| 209 |
+
else:
|
| 210 |
+
print("\nβ Some tests failed. Please check the errors above.")
|
app/Hackathon_setup/test_local.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Simple local test script to debug UNKNOWN_CLASS issue
|
| 3 |
+
Run this to test face recognition locally without deploying
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
import numpy as np
|
| 9 |
+
import cv2
|
| 10 |
+
from PIL import Image
|
| 11 |
+
|
| 12 |
+
# Add current directory to path
|
| 13 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 14 |
+
|
| 15 |
+
def test_locally():
|
| 16 |
+
"""Test face recognition locally with detailed debugging"""
|
| 17 |
+
print("Local Face Recognition Test")
|
| 18 |
+
print("=" * 40)
|
| 19 |
+
|
| 20 |
+
# Check if we're in the right directory
|
| 21 |
+
print(f"Current directory: {os.getcwd()}")
|
| 22 |
+
print(f"Python path: {sys.path[0]}")
|
| 23 |
+
|
| 24 |
+
# Check if model files exist
|
| 25 |
+
print("\n1. Checking model files...")
|
| 26 |
+
model_files = [
|
| 27 |
+
'siamese_model.t7',
|
| 28 |
+
'decision_tree_model.sav',
|
| 29 |
+
'face_recognition_scaler.sav',
|
| 30 |
+
'haarcascade_frontalface_default.xml'
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
for file in model_files:
|
| 34 |
+
if os.path.exists(file):
|
| 35 |
+
size = os.path.getsize(file)
|
| 36 |
+
print(f" β {file} exists ({size} bytes)")
|
| 37 |
+
else:
|
| 38 |
+
print(f" β {file} missing!")
|
| 39 |
+
|
| 40 |
+
# Test with a simple synthetic image first
|
| 41 |
+
print("\n2. Testing with synthetic image...")
|
| 42 |
+
|
| 43 |
+
# Create a simple face-like image
|
| 44 |
+
test_img = create_simple_face()
|
| 45 |
+
print(f" Created test image: {test_img.shape}")
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
# Import and test
|
| 49 |
+
from face_recognition import get_face_class, CLASS_NAMES
|
| 50 |
+
print(f" β Imported face_recognition")
|
| 51 |
+
print(f" CLASS_NAMES: {CLASS_NAMES}")
|
| 52 |
+
|
| 53 |
+
# Test classification
|
| 54 |
+
result = get_face_class(test_img)
|
| 55 |
+
print(f" Classification result: {result}")
|
| 56 |
+
|
| 57 |
+
if result == "UNKNOWN_CLASS":
|
| 58 |
+
print(" β Still getting UNKNOWN_CLASS!")
|
| 59 |
+
debug_detailed(test_img)
|
| 60 |
+
elif result in CLASS_NAMES:
|
| 61 |
+
print(f" β Success! Classified as: {result}")
|
| 62 |
+
else:
|
| 63 |
+
print(f" ? Unexpected result: {result}")
|
| 64 |
+
|
| 65 |
+
except Exception as e:
|
| 66 |
+
print(f" β Error: {e}")
|
| 67 |
+
import traceback
|
| 68 |
+
traceback.print_exc()
|
| 69 |
+
|
| 70 |
+
# Test with actual image if available
|
| 71 |
+
print("\n3. Testing with actual image...")
|
| 72 |
+
actual_paths = [
|
| 73 |
+
"../static/Person1_1697805233.jpg",
|
| 74 |
+
"Person1_1697805233.jpg",
|
| 75 |
+
"static/Person1_1697805233.jpg"
|
| 76 |
+
]
|
| 77 |
+
|
| 78 |
+
for path in actual_paths:
|
| 79 |
+
if os.path.exists(path):
|
| 80 |
+
print(f" Found image at: {path}")
|
| 81 |
+
try:
|
| 82 |
+
img = cv2.imread(path)
|
| 83 |
+
result = get_face_class(img)
|
| 84 |
+
print(f" Result: {result}")
|
| 85 |
+
break
|
| 86 |
+
except Exception as e:
|
| 87 |
+
print(f" Error with {path}: {e}")
|
| 88 |
+
else:
|
| 89 |
+
print(" No actual image found")
|
| 90 |
+
|
| 91 |
+
def create_simple_face():
|
| 92 |
+
"""Create a very simple face image for testing"""
|
| 93 |
+
img = np.zeros((100, 100, 3), dtype=np.uint8)
|
| 94 |
+
|
| 95 |
+
# Simple face
|
| 96 |
+
cv2.ellipse(img, (50, 50), (40, 50), 0, 0, 360, (100, 100, 100), -1)
|
| 97 |
+
cv2.circle(img, (35, 40), 5, (200, 200, 200), -1)
|
| 98 |
+
cv2.circle(img, (65, 40), 5, (200, 200, 200), -1)
|
| 99 |
+
cv2.line(img, (50, 45), (50, 60), (150, 150, 150), 2)
|
| 100 |
+
cv2.ellipse(img, (50, 70), (15, 8), 0, 0, 180, (150, 150, 150), 2)
|
| 101 |
+
|
| 102 |
+
return img
|
| 103 |
+
|
| 104 |
+
def debug_detailed(img):
|
| 105 |
+
"""Detailed debugging of the classification process"""
|
| 106 |
+
print("\n4. Detailed debugging...")
|
| 107 |
+
|
| 108 |
+
try:
|
| 109 |
+
import torch
|
| 110 |
+
import joblib
|
| 111 |
+
from face_recognition import detected_face, trnscm, CLASS_NAMES
|
| 112 |
+
from face_recognition_model import Siamese
|
| 113 |
+
|
| 114 |
+
print(" Step 1: Face detection...")
|
| 115 |
+
det_img = detected_face(img)
|
| 116 |
+
print(f" Detected face type: {type(det_img)}")
|
| 117 |
+
if det_img is None:
|
| 118 |
+
print(" No face detected, using fallback")
|
| 119 |
+
det_img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
|
| 120 |
+
|
| 121 |
+
print(" Step 2: Image transformation...")
|
| 122 |
+
face_tensor = trnscm(det_img).unsqueeze(0)
|
| 123 |
+
print(f" Tensor shape: {face_tensor.shape}")
|
| 124 |
+
print(f" Tensor values range: {face_tensor.min():.3f} to {face_tensor.max():.3f}")
|
| 125 |
+
|
| 126 |
+
print(" Step 3: Siamese network...")
|
| 127 |
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 128 |
+
print(f" Using device: {device}")
|
| 129 |
+
|
| 130 |
+
siamese_net = Siamese().to(device)
|
| 131 |
+
model_data = torch.load('siamese_model.t7', map_location=device)
|
| 132 |
+
|
| 133 |
+
if isinstance(model_data, dict) and 'net_dict' in model_data:
|
| 134 |
+
siamese_net.load_state_dict(model_data['net_dict'])
|
| 135 |
+
print(" Loaded model from net_dict")
|
| 136 |
+
else:
|
| 137 |
+
siamese_net.load_state_dict(model_data)
|
| 138 |
+
print(" Loaded model from state_dict")
|
| 139 |
+
|
| 140 |
+
siamese_net.eval()
|
| 141 |
+
|
| 142 |
+
print(" Step 4: Feature extraction...")
|
| 143 |
+
with torch.no_grad():
|
| 144 |
+
embedding = siamese_net.forward_once(face_tensor.to(device)).cpu().numpy()
|
| 145 |
+
print(f" Embedding shape: {embedding.shape}")
|
| 146 |
+
print(f" Embedding values: {embedding}")
|
| 147 |
+
|
| 148 |
+
print(" Step 5: Classification...")
|
| 149 |
+
scaler = joblib.load('face_recognition_scaler.sav')
|
| 150 |
+
classifier = joblib.load('decision_tree_model.sav')
|
| 151 |
+
|
| 152 |
+
print(f" Scaler features: {scaler.n_features_in_}")
|
| 153 |
+
print(f" Classifier classes: {classifier.classes_}")
|
| 154 |
+
|
| 155 |
+
if embedding.ndim == 1:
|
| 156 |
+
embedding = embedding.reshape(1, -1)
|
| 157 |
+
print(f" Reshaped embedding: {embedding.shape}")
|
| 158 |
+
|
| 159 |
+
embedding_scaled = scaler.transform(embedding)
|
| 160 |
+
print(f" Scaled embedding: {embedding_scaled}")
|
| 161 |
+
|
| 162 |
+
predicted_label_index = classifier.predict(embedding_scaled)[0]
|
| 163 |
+
print(f" Predicted index: {predicted_label_index}")
|
| 164 |
+
|
| 165 |
+
print(f" CLASS_NAMES: {CLASS_NAMES}")
|
| 166 |
+
print(f" CLASS_NAMES length: {len(CLASS_NAMES)}")
|
| 167 |
+
|
| 168 |
+
if predicted_label_index < len(CLASS_NAMES):
|
| 169 |
+
class_name = CLASS_NAMES[predicted_label_index]
|
| 170 |
+
print(f" β Should return: {class_name}")
|
| 171 |
+
else:
|
| 172 |
+
print(f" β Index {predicted_label_index} >= {len(CLASS_NAMES)}")
|
| 173 |
+
print(f" β This causes UNKNOWN_CLASS!")
|
| 174 |
+
|
| 175 |
+
except Exception as e:
|
| 176 |
+
print(f" β Error in detailed debug: {e}")
|
| 177 |
+
import traceback
|
| 178 |
+
traceback.print_exc()
|
| 179 |
+
|
| 180 |
+
if __name__ == "__main__":
|
| 181 |
+
test_locally()
|
test_local_server.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Simple local FastAPI server for testing face recognition
|
| 3 |
+
Run this to test the web interface locally
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
import os
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
# Add the parent directory to Python path
|
| 11 |
+
sys.path.append(str(Path(__file__).resolve().parent.parent))
|
| 12 |
+
|
| 13 |
+
from fastapi import FastAPI, Request, File, UploadFile
|
| 14 |
+
from fastapi.staticfiles import StaticFiles
|
| 15 |
+
from fastapi.templating import Jinja2Templates
|
| 16 |
+
from fastapi.responses import HTMLResponse
|
| 17 |
+
import numpy as np
|
| 18 |
+
from PIL import Image
|
| 19 |
+
import uvicorn
|
| 20 |
+
|
| 21 |
+
# Import face recognition
|
| 22 |
+
from app.Hackathon_setup import face_recognition
|
| 23 |
+
|
| 24 |
+
app = FastAPI(title="Local Face Recognition Test")
|
| 25 |
+
|
| 26 |
+
# Mount static files
|
| 27 |
+
app.mount("/static", StaticFiles(directory="app/static"), name="static")
|
| 28 |
+
|
| 29 |
+
# Templates
|
| 30 |
+
templates = Jinja2Templates(directory="app/templates")
|
| 31 |
+
|
| 32 |
+
@app.get("/", response_class=HTMLResponse)
|
| 33 |
+
async def root():
|
| 34 |
+
"""Simple HTML interface for testing"""
|
| 35 |
+
html_content = """
|
| 36 |
+
<!DOCTYPE html>
|
| 37 |
+
<html>
|
| 38 |
+
<head>
|
| 39 |
+
<title>Face Recognition Test</title>
|
| 40 |
+
<style>
|
| 41 |
+
body { font-family: Arial, sans-serif; margin: 40px; }
|
| 42 |
+
.container { max-width: 600px; margin: 0 auto; }
|
| 43 |
+
.form-group { margin: 20px 0; }
|
| 44 |
+
input[type="file"] { margin: 10px 0; }
|
| 45 |
+
button { background: #007bff; color: white; padding: 10px 20px; border: none; border-radius: 5px; cursor: pointer; }
|
| 46 |
+
button:hover { background: #0056b3; }
|
| 47 |
+
.result { margin: 20px 0; padding: 15px; background: #f8f9fa; border-radius: 5px; }
|
| 48 |
+
.error { background: #f8d7da; color: #721c24; }
|
| 49 |
+
.success { background: #d4edda; color: #155724; }
|
| 50 |
+
</style>
|
| 51 |
+
</head>
|
| 52 |
+
<body>
|
| 53 |
+
<div class="container">
|
| 54 |
+
<h1>π§ Face Recognition Test</h1>
|
| 55 |
+
<p>Upload a face image to test the classification locally.</p>
|
| 56 |
+
|
| 57 |
+
<form action="/predict" method="post" enctype="multipart/form-data">
|
| 58 |
+
<div class="form-group">
|
| 59 |
+
<label for="file">Select Face Image:</label><br>
|
| 60 |
+
<input type="file" id="file" name="file" accept="image/*" required>
|
| 61 |
+
</div>
|
| 62 |
+
<button type="submit">π Classify Face</button>
|
| 63 |
+
</form>
|
| 64 |
+
|
| 65 |
+
<div id="result"></div>
|
| 66 |
+
</div>
|
| 67 |
+
|
| 68 |
+
<script>
|
| 69 |
+
document.querySelector('form').addEventListener('submit', async function(e) {
|
| 70 |
+
e.preventDefault();
|
| 71 |
+
|
| 72 |
+
const formData = new FormData();
|
| 73 |
+
const fileInput = document.getElementById('file');
|
| 74 |
+
formData.append('file', fileInput.files[0]);
|
| 75 |
+
|
| 76 |
+
const resultDiv = document.getElementById('result');
|
| 77 |
+
resultDiv.innerHTML = '<div class="result">Processing...</div>';
|
| 78 |
+
|
| 79 |
+
try {
|
| 80 |
+
const response = await fetch('/predict', {
|
| 81 |
+
method: 'POST',
|
| 82 |
+
body: formData
|
| 83 |
+
});
|
| 84 |
+
|
| 85 |
+
const result = await response.text();
|
| 86 |
+
|
| 87 |
+
if (result.includes('UNKNOWN_CLASS')) {
|
| 88 |
+
resultDiv.innerHTML = `<div class="result error">β Result: ${result}</div>`;
|
| 89 |
+
} else if (result.includes('Person')) {
|
| 90 |
+
resultDiv.innerHTML = `<div class="result success">β
Result: ${result}</div>`;
|
| 91 |
+
} else {
|
| 92 |
+
resultDiv.innerHTML = `<div class="result">π Result: ${result}</div>`;
|
| 93 |
+
}
|
| 94 |
+
} catch (error) {
|
| 95 |
+
resultDiv.innerHTML = `<div class="result error">β Error: ${error.message}</div>`;
|
| 96 |
+
}
|
| 97 |
+
});
|
| 98 |
+
</script>
|
| 99 |
+
</body>
|
| 100 |
+
</html>
|
| 101 |
+
"""
|
| 102 |
+
return HTMLResponse(content=html_content)
|
| 103 |
+
|
| 104 |
+
@app.post("/predict")
|
| 105 |
+
async def predict_face(file: UploadFile = File(...)):
|
| 106 |
+
"""Predict face class from uploaded image"""
|
| 107 |
+
try:
|
| 108 |
+
# Save uploaded file
|
| 109 |
+
contents = await file.read()
|
| 110 |
+
filename = f"app/static/{file.filename}"
|
| 111 |
+
|
| 112 |
+
with open(filename, 'wb') as f:
|
| 113 |
+
f.write(contents)
|
| 114 |
+
|
| 115 |
+
# Load and process image
|
| 116 |
+
img = Image.open(filename)
|
| 117 |
+
img_array = np.array(img).reshape(img.size[1], img.size[0], 3).astype(np.uint8)
|
| 118 |
+
|
| 119 |
+
# Get face class
|
| 120 |
+
result = face_recognition.get_face_class(img_array)
|
| 121 |
+
|
| 122 |
+
return f"Predicted Face Class: {result}"
|
| 123 |
+
|
| 124 |
+
except Exception as e:
|
| 125 |
+
return f"Error: {str(e)}"
|
| 126 |
+
|
| 127 |
+
@app.get("/test")
|
| 128 |
+
async def test_endpoint():
|
| 129 |
+
"""Simple test endpoint"""
|
| 130 |
+
try:
|
| 131 |
+
from app.Hackathon_setup.face_recognition import CLASS_NAMES
|
| 132 |
+
import joblib
|
| 133 |
+
|
| 134 |
+
# Test model loading
|
| 135 |
+
classifier = joblib.load('app/Hackathon_setup/decision_tree_model.sav')
|
| 136 |
+
scaler = joblib.load('app/Hackathon_setup/face_recognition_scaler.sav')
|
| 137 |
+
|
| 138 |
+
return {
|
| 139 |
+
"status": "success",
|
| 140 |
+
"class_names": CLASS_NAMES,
|
| 141 |
+
"classifier_classes": classifier.classes_.tolist(),
|
| 142 |
+
"scaler_features": scaler.n_features_in_
|
| 143 |
+
}
|
| 144 |
+
except Exception as e:
|
| 145 |
+
return {"status": "error", "message": str(e)}
|
| 146 |
+
|
| 147 |
+
if __name__ == "__main__":
|
| 148 |
+
print("Starting local Face Recognition test server...")
|
| 149 |
+
print("Open your browser and go to: http://localhost:8000")
|
| 150 |
+
print("Press Ctrl+C to stop the server")
|
| 151 |
+
|
| 152 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|