File size: 11,996 Bytes
c9ee150
 
 
 
3f07cb2
 
 
 
c9ee150
 
 
 
3f07cb2
 
 
c9ee150
2ca4976
 
 
 
c9ee150
 
 
 
 
 
 
 
3f07cb2
c9ee150
 
 
 
 
3f07cb2
 
 
c9ee150
1bf6e79
 
 
 
 
 
 
9678276
1bf6e79
 
 
fe2da33
3863a95
3f07cb2
d029190
c959f67
c9ee150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe2da33
 
 
 
 
 
 
 
 
 
 
 
c9ee150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3f07cb2
 
da26386
3f07cb2
 
c9ee150
 
 
 
 
 
3f07cb2
c9ee150
 
3f07cb2
3d588a7
da26386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d588a7
da26386
 
 
 
 
 
 
 
 
 
 
 
 
3d588a7
 
da26386
c9ee150
3f07cb2
c9ee150
 
3f07cb2
 
 
3d588a7
 
 
 
c9ee150
da26386
 
3f07cb2
 
 
c9ee150
3f07cb2
 
1bf6e79
c9ee150
 
3d588a7
c9ee150
 
 
 
 
7bf7aa7
c9ee150
 
7bf7aa7
 
 
 
 
1bf6e79
 
c9ee150
 
 
 
7bf7aa7
c9ee150
 
 
7bf7aa7
 
c9ee150
 
 
3f07cb2
34b001a
3f07cb2
 
1bf6e79
34b001a
c9ee150
3d588a7
34b001a
c9ee150
d029190
c9ee150
34b001a
 
 
c9ee150
7bf7aa7
34b001a
7bf7aa7
 
34b001a
7bf7aa7
 
34b001a
 
1bf6e79
34b001a
 
c9ee150
 
34b001a
 
 
 
 
 
 
 
c9ee150
 
fe2da33
34b001a
c9ee150
34b001a
 
 
 
 
 
 
 
 
 
 
d029190
34b001a
 
 
 
 
c9ee150
d029190
34b001a
 
 
 
c959f67
34b001a
 
 
 
 
 
 
d029190
34b001a
 
 
 
 
 
 
 
 
 
 
 
1bf6e79
34b001a
c9ee150
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
"""
Robust face recognition implementation that handles sklearn version compatibility issues
"""

import numpy as np
import cv2
from matplotlib import pyplot as plt
import torch
import warnings
import os
import joblib
import pickle
from PIL import Image
import base64
import io

# Suppress sklearn version warnings
warnings.filterwarnings('ignore', category=UserWarning, module='sklearn')
warnings.filterwarnings('ignore', message='.*InconsistentVersionWarning.*')

# Try different import methods for sklearn
try:
    from sklearn.metrics.pairwise import cosine_similarity
    from sklearn.preprocessing import StandardScaler
    SKLEARN_AVAILABLE = True
except ImportError:
    SKLEARN_AVAILABLE = False
    print("Warning: sklearn not available, using fallback methods")

# Import model
try:
    from .face_recognition_model import *
except ImportError:
    from face_recognition_model import *

# Current_path stores absolute path of the file from where it runs. 
current_path = os.path.dirname(os.path.abspath(__file__))

# --- GLOBAL SETUP: Must match your training transforms ---
# Define the transformation pipeline for inference
trnscm = transforms.Compose([
    transforms.Grayscale(num_output_channels=1),
    transforms.Resize((100, 100)),
    transforms.ToTensor()
])
CLASS_NAMES = ['Person1', 'Person2','Person3'] # Only 4 persons

# --- Model Filenames ---
SIAMESE_MODEL_PATH = current_path + '/siamese_model.t7'
DECISION_TREE_MODEL_PATH = current_path + '/decision_tree_model.sav'
SCALER_PATH = current_path + '/decision_scaler.sav'



def safe_cosine_similarity(embed1, embed2):
    """Calculate cosine similarity with fallback methods"""
    if SKLEARN_AVAILABLE:
        try:
            # Ensure embeddings are 2D arrays for sklearn cosine_similarity
            if embed1.ndim == 1:
                embed1 = embed1.reshape(1, -1)
            if embed2.ndim == 1:
                embed2 = embed2.reshape(1, -1)
                
            similarity = cosine_similarity(embed1, embed2)[0][0]
            # Clamp similarity to valid range [-1, 1]
            similarity = np.clip(similarity, -1.0, 1.0)
            return float(similarity)
        except Exception as e:
            print(f"Sklearn cosine_similarity failed: {e}, using fallback")
    
    # Fallback: Manual cosine similarity calculation
    try:
        # Normalize vectors
        embed1_norm = embed1 / (np.linalg.norm(embed1) + 1e-8)
        embed2_norm = embed2 / (np.linalg.norm(embed2) + 1e-8)
        
        # Calculate cosine similarity
        similarity = np.dot(embed1_norm.flatten(), embed2_norm.flatten())
        similarity = np.clip(similarity, -1.0, 1.0)
        return float(similarity)
    except Exception as e:
        print(f"Fallback cosine similarity failed: {e}")
        return 0.0

def safe_load_model(file_path, model_type="joblib"):
    """Safely load models with multiple fallback methods"""
    if not os.path.exists(file_path):
        raise FileNotFoundError(f"Model file not found: {file_path}")
    
    try:
        if model_type == "joblib":
            model = joblib.load(file_path)
            # Additional validation for Decision Tree models
            if hasattr(model, 'predict'):
                # Test if the model can make predictions
                import numpy as np
                dummy_data = np.random.randn(1, 5)  # Test with dummy data
                try:
                    _ = model.predict(dummy_data)
                    print(f"โœ“ Model {file_path} loaded and validated successfully")
                except Exception as test_error:
                    print(f"โš ๏ธ Model loaded but prediction test failed: {test_error}")
            return model
        elif model_type == "pickle":
            with open(file_path, 'rb') as f:
                return pickle.load(f)
    except Exception as e:
        print(f"Failed to load {file_path} with {model_type}: {e}")
        
        # Try alternative loading methods
        if model_type == "joblib":
            try:
                with open(file_path, 'rb') as f:
                    return pickle.load(f)
            except Exception as e2:
                print(f"Pickle fallback also failed: {e2}")
                raise e
        else:
            raise e

def detected_face(image):
    """Improved face detection with multiple parameters and preprocessing"""
    eye_haar = current_path + '/haarcascade_eye.xml'
    face_haar = current_path + '/haarcascade_frontalface_default.xml'
    
    # Check if cascade files exist
    if not os.path.exists(face_haar):
        print(f"Warning: {face_haar} not found, using fallback")
        return Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
    
    face_cascade = cv2.CascadeClassifier(face_haar)
    eye_cascade = cv2.CascadeClassifier(eye_haar) if os.path.exists(eye_haar) else None
    
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    
    # Try multiple detection parameters for better results
    detection_params = [
        (1.1, 3),   # Default
        (1.05, 4),  # More sensitive
        (1.2, 2),   # Less sensitive but faster
        (1.3, 5)    # Very sensitive
    ]
    
    faces = []
    for scale_factor, min_neighbors in detection_params:
        faces = face_cascade.detectMultiScale(
            gray, 
            scaleFactor=scale_factor, 
            minNeighbors=min_neighbors,
            minSize=(30, 30),  # Minimum face size
            maxSize=(300, 300) # Maximum face size
        )
        if len(faces) > 0:
            print(f"โœ“ Faces detected with scaleFactor={scale_factor}, minNeighbors={min_neighbors}")
            break
    
    # If still no faces, try with different image preprocessing
    if len(faces) == 0:
        print("No faces detected with standard parameters, trying preprocessing...")
        
        # Try histogram equalization
        gray_eq = cv2.equalizeHist(gray)
        faces = face_cascade.detectMultiScale(gray_eq, 1.1, 3)
        
        if len(faces) == 0:
            # Try Gaussian blur
            gray_blur = cv2.GaussianBlur(gray, (3, 3), 0)
            faces = face_cascade.detectMultiScale(gray_blur, 1.1, 3)
    
    if len(faces) == 0:
        print("No faces detected after all attempts, using fallback")
        return None
    
    # Find the largest face
    face_areas = []
    images = []
    
    for i, (x, y, w, h) in enumerate(faces):
        face_cropped = gray[y:y+h, x:x+w]
        face_areas.append(w*h)
        images.append(face_cropped)
    
    # Get the largest face
    largest_face_idx = np.argmax(face_areas)
    required_image = Image.fromarray(images[largest_face_idx])
    
    print(f"โœ“ Selected face {largest_face_idx + 1} of {len(faces)} detected faces")
    
    return required_image

def get_similarity(img1, img2):
    """Get similarity between two face images"""
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    try:
        det_img1 = detected_face(img1)
        det_img2 = detected_face(img2)
        if det_img1 is None or det_img2 is None:
            det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
            det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
        
        face1 = trnscm(det_img1).unsqueeze(0)
        face2 = trnscm(det_img2).unsqueeze(0)
        
        # Load Siamese Network
        siamese_net = Siamese().to(device)
        model_data = torch.load(SIAMESE_MODEL_PATH, map_location=device)
        if isinstance(model_data, dict) and 'net_dict' in model_data:
            siamese_net.load_state_dict(model_data['net_dict'])
        else:
            siamese_net.load_state_dict(model_data)
        siamese_net.eval()
        
        # Get embeddings
        with torch.no_grad():
            embed1 = siamese_net.forward_once(face1.to(device)).cpu().numpy()
            embed2 = siamese_net.forward_once(face2.to(device)).cpu().numpy()
        
        # Calculate similarity
        similarity = safe_cosine_similarity(embed1, embed2)
        return similarity
        
    except Exception as e:
        print(f"Error in get_similarity: {e}")
        return -1.0

def get_face_class(img1):
    """Get face class for a single image with detailed debugging."""
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    try:
        # --- Face Detection ---
        det_img1 = detected_face(img1)
        if det_img1 is None:
            print("โš ๏ธ No face detected, using grayscale fallback.")
            det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
            
        face1_tensor = trnscm(det_img1).unsqueeze(0).to(device)
        print(f"๐Ÿงฉ Face tensor shape: {face1_tensor.shape}")

        # --- Load Siamese Model ---
        siamese_net = Siamese().to(device)
        model_data = torch.load(SIAMESE_MODEL_PATH, map_location=device)

        if isinstance(model_data, dict) and 'net_dict' in model_data:
            siamese_net.load_state_dict(model_data['net_dict'])
            print("โœ“ Siamese model loaded (from net_dict key).")
        else:
            siamese_net.load_state_dict(model_data)
            print("โœ“ Siamese model loaded (direct state_dict).")

        siamese_net.eval()

        # --- Extract Embedding ---
        with torch.no_grad():
            embedding_np = siamese_net.forward_once(face1_tensor).cpu().numpy()

        print(f"๐Ÿง  Raw embedding shape: {embedding_np.shape}")
        print(f"๐Ÿงฎ Embedding mean={np.mean(embedding_np):.5f}, std={np.std(embedding_np):.5f}")

        if embedding_np.ndim == 1:
            embedding_np = embedding_np.reshape(1, -1)

        # --- Load Scaler and Classifier ---
        try:
            scaler = safe_load_model(SCALER_PATH, "joblib")
            classifier = safe_load_model(DECISION_TREE_MODEL_PATH, "joblib")
            print("โœ“ Loaded scaler and classifier via joblib.")
        except Exception as e:
            print(f"โš ๏ธ Joblib load failed: {e}, trying pickle...")
            scaler = safe_load_model(SCALER_PATH, "pickle")
            classifier = safe_load_model(DECISION_TREE_MODEL_PATH, "pickle")

        # --- Validate Objects ---
        if not hasattr(scaler, 'transform'):
            print("โŒ Scaler missing transform() โ€” corrupted file?")
            return "UNKNOWN_CLASS"
        if not hasattr(classifier, 'predict'):
            print("โŒ Classifier missing predict() โ€” corrupted file?")
            return "UNKNOWN_CLASS"

        # --- Transform Embedding ---
        try:
            embedding_scaled = scaler.transform(embedding_np)
        except Exception as e:
            print(f"โŒ Scaler transform failed: {e}")
            return "UNKNOWN_CLASS"

        print(f"โš™๏ธ Scaled embedding (first 5 vals): {embedding_scaled[0, :5]}")
        print(f"โš™๏ธ Scaled embedding mean={np.mean(embedding_scaled):.5f}, std={np.std(embedding_scaled):.5f}")

        # --- Predict Class ---
        try:
            prediction = classifier.predict(embedding_scaled)
            print(f"๐Ÿงพ Raw classifier prediction: {prediction}")
            if hasattr(classifier, 'predict_proba'):
                proba = classifier.predict_proba(embedding_scaled)
                print(f"๐Ÿ“Š Prediction probabilities: {np.round(proba, 3)}")
        except Exception as e:
            print(f"โŒ Prediction failed: {e}")
            return "UNKNOWN_CLASS"

        predicted_label_index = int(prediction[0])

        # --- Map Index to Class Name ---
        if 0 <= predicted_label_index < len(CLASS_NAMES):
            class_name = CLASS_NAMES[predicted_label_index]
            print(f"โœ… Final Prediction: Index {predicted_label_index} -> {class_name}")
            return class_name
        else:
            print(f"โš ๏ธ Invalid class index: {predicted_label_index}")
            return "UNKNOWN_CLASS"

    except Exception as e:
        print(f"๐Ÿ’ฅ Error in get_face_class(): {e}")
        return f"Error: {str(e)}"