Spaces:
Sleeping
Sleeping
| """ | |
| Robust face recognition implementation that handles sklearn version compatibility issues | |
| """ | |
| import numpy as np | |
| import cv2 | |
| from matplotlib import pyplot as plt | |
| import torch | |
| import warnings | |
| import os | |
| import joblib | |
| import pickle | |
| from PIL import Image | |
| import base64 | |
| import io | |
| # Suppress sklearn version warnings | |
| warnings.filterwarnings('ignore', category=UserWarning, module='sklearn') | |
| warnings.filterwarnings('ignore', message='.*InconsistentVersionWarning.*') | |
| # Try different import methods for sklearn | |
| try: | |
| from sklearn.metrics.pairwise import cosine_similarity | |
| from sklearn.preprocessing import StandardScaler | |
| SKLEARN_AVAILABLE = True | |
| except ImportError: | |
| SKLEARN_AVAILABLE = False | |
| print("Warning: sklearn not available, using fallback methods") | |
| # Import model | |
| try: | |
| from .face_recognition_model import * | |
| except ImportError: | |
| from face_recognition_model import * | |
| # Current_path stores absolute path of the file from where it runs. | |
| current_path = os.path.dirname(os.path.abspath(__file__)) | |
| # --- GLOBAL SETUP: Must match your training transforms --- | |
| # Define the transformation pipeline for inference | |
| trnscm = transforms.Compose([ | |
| transforms.Grayscale(num_output_channels=1), | |
| transforms.Resize((100, 100)), | |
| transforms.ToTensor() | |
| ]) | |
| CLASS_NAMES = ['Person1', 'Person2','Person3'] # Only 4 persons | |
| # --- Model Filenames --- | |
| SIAMESE_MODEL_PATH = current_path + '/siamese_model.t7' | |
| DECISION_TREE_MODEL_PATH = current_path + '/decision_tree_model.sav' | |
| SCALER_PATH = current_path + '/decision_scaler.sav' | |
| def safe_cosine_similarity(embed1, embed2): | |
| """Calculate cosine similarity with fallback methods""" | |
| if SKLEARN_AVAILABLE: | |
| try: | |
| # Ensure embeddings are 2D arrays for sklearn cosine_similarity | |
| if embed1.ndim == 1: | |
| embed1 = embed1.reshape(1, -1) | |
| if embed2.ndim == 1: | |
| embed2 = embed2.reshape(1, -1) | |
| similarity = cosine_similarity(embed1, embed2)[0][0] | |
| # Clamp similarity to valid range [-1, 1] | |
| similarity = np.clip(similarity, -1.0, 1.0) | |
| return float(similarity) | |
| except Exception as e: | |
| print(f"Sklearn cosine_similarity failed: {e}, using fallback") | |
| # Fallback: Manual cosine similarity calculation | |
| try: | |
| # Normalize vectors | |
| embed1_norm = embed1 / (np.linalg.norm(embed1) + 1e-8) | |
| embed2_norm = embed2 / (np.linalg.norm(embed2) + 1e-8) | |
| # Calculate cosine similarity | |
| similarity = np.dot(embed1_norm.flatten(), embed2_norm.flatten()) | |
| similarity = np.clip(similarity, -1.0, 1.0) | |
| return float(similarity) | |
| except Exception as e: | |
| print(f"Fallback cosine similarity failed: {e}") | |
| return 0.0 | |
| def safe_load_model(file_path, model_type="joblib"): | |
| """Safely load models with multiple fallback methods""" | |
| if not os.path.exists(file_path): | |
| raise FileNotFoundError(f"Model file not found: {file_path}") | |
| try: | |
| if model_type == "joblib": | |
| model = joblib.load(file_path) | |
| # Additional validation for Decision Tree models | |
| if hasattr(model, 'predict'): | |
| # Test if the model can make predictions | |
| import numpy as np | |
| dummy_data = np.random.randn(1, 5) # Test with dummy data | |
| try: | |
| _ = model.predict(dummy_data) | |
| print(f"โ Model {file_path} loaded and validated successfully") | |
| except Exception as test_error: | |
| print(f"โ ๏ธ Model loaded but prediction test failed: {test_error}") | |
| return model | |
| elif model_type == "pickle": | |
| with open(file_path, 'rb') as f: | |
| return pickle.load(f) | |
| except Exception as e: | |
| print(f"Failed to load {file_path} with {model_type}: {e}") | |
| # Try alternative loading methods | |
| if model_type == "joblib": | |
| try: | |
| with open(file_path, 'rb') as f: | |
| return pickle.load(f) | |
| except Exception as e2: | |
| print(f"Pickle fallback also failed: {e2}") | |
| raise e | |
| else: | |
| raise e | |
| def detected_face(image): | |
| """Improved face detection with multiple parameters and preprocessing""" | |
| eye_haar = current_path + '/haarcascade_eye.xml' | |
| face_haar = current_path + '/haarcascade_frontalface_default.xml' | |
| # Check if cascade files exist | |
| if not os.path.exists(face_haar): | |
| print(f"Warning: {face_haar} not found, using fallback") | |
| return Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)) | |
| face_cascade = cv2.CascadeClassifier(face_haar) | |
| eye_cascade = cv2.CascadeClassifier(eye_haar) if os.path.exists(eye_haar) else None | |
| gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
| # Try multiple detection parameters for better results | |
| detection_params = [ | |
| (1.1, 3), # Default | |
| (1.05, 4), # More sensitive | |
| (1.2, 2), # Less sensitive but faster | |
| (1.3, 5) # Very sensitive | |
| ] | |
| faces = [] | |
| for scale_factor, min_neighbors in detection_params: | |
| faces = face_cascade.detectMultiScale( | |
| gray, | |
| scaleFactor=scale_factor, | |
| minNeighbors=min_neighbors, | |
| minSize=(30, 30), # Minimum face size | |
| maxSize=(300, 300) # Maximum face size | |
| ) | |
| if len(faces) > 0: | |
| print(f"โ Faces detected with scaleFactor={scale_factor}, minNeighbors={min_neighbors}") | |
| break | |
| # If still no faces, try with different image preprocessing | |
| if len(faces) == 0: | |
| print("No faces detected with standard parameters, trying preprocessing...") | |
| # Try histogram equalization | |
| gray_eq = cv2.equalizeHist(gray) | |
| faces = face_cascade.detectMultiScale(gray_eq, 1.1, 3) | |
| if len(faces) == 0: | |
| # Try Gaussian blur | |
| gray_blur = cv2.GaussianBlur(gray, (3, 3), 0) | |
| faces = face_cascade.detectMultiScale(gray_blur, 1.1, 3) | |
| if len(faces) == 0: | |
| print("No faces detected after all attempts, using fallback") | |
| return None | |
| # Find the largest face | |
| face_areas = [] | |
| images = [] | |
| for i, (x, y, w, h) in enumerate(faces): | |
| face_cropped = gray[y:y+h, x:x+w] | |
| face_areas.append(w*h) | |
| images.append(face_cropped) | |
| # Get the largest face | |
| largest_face_idx = np.argmax(face_areas) | |
| required_image = Image.fromarray(images[largest_face_idx]) | |
| print(f"โ Selected face {largest_face_idx + 1} of {len(faces)} detected faces") | |
| return required_image | |
| def get_similarity(img1, img2): | |
| """Get similarity between two face images""" | |
| device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
| try: | |
| det_img1 = detected_face(img1) | |
| det_img2 = detected_face(img2) | |
| if det_img1 is None or det_img2 is None: | |
| det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)) | |
| det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)) | |
| face1 = trnscm(det_img1).unsqueeze(0) | |
| face2 = trnscm(det_img2).unsqueeze(0) | |
| # Load Siamese Network | |
| siamese_net = Siamese().to(device) | |
| model_data = torch.load(SIAMESE_MODEL_PATH, map_location=device) | |
| if isinstance(model_data, dict) and 'net_dict' in model_data: | |
| siamese_net.load_state_dict(model_data['net_dict']) | |
| else: | |
| siamese_net.load_state_dict(model_data) | |
| siamese_net.eval() | |
| # Get embeddings | |
| with torch.no_grad(): | |
| embed1 = siamese_net.forward_once(face1.to(device)).cpu().numpy() | |
| embed2 = siamese_net.forward_once(face2.to(device)).cpu().numpy() | |
| # Calculate similarity | |
| similarity = safe_cosine_similarity(embed1, embed2) | |
| return similarity | |
| except Exception as e: | |
| print(f"Error in get_similarity: {e}") | |
| return -1.0 | |
| def get_face_class(img1): | |
| """Get face class for a single image with detailed debugging.""" | |
| device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
| try: | |
| # --- Face Detection --- | |
| det_img1 = detected_face(img1) | |
| if det_img1 is None: | |
| print("โ ๏ธ No face detected, using grayscale fallback.") | |
| det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)) | |
| face1_tensor = trnscm(det_img1).unsqueeze(0).to(device) | |
| print(f"๐งฉ Face tensor shape: {face1_tensor.shape}") | |
| # --- Load Siamese Model --- | |
| siamese_net = Siamese().to(device) | |
| model_data = torch.load(SIAMESE_MODEL_PATH, map_location=device) | |
| if isinstance(model_data, dict) and 'net_dict' in model_data: | |
| siamese_net.load_state_dict(model_data['net_dict']) | |
| print("โ Siamese model loaded (from net_dict key).") | |
| else: | |
| siamese_net.load_state_dict(model_data) | |
| print("โ Siamese model loaded (direct state_dict).") | |
| siamese_net.eval() | |
| # --- Extract Embedding --- | |
| with torch.no_grad(): | |
| embedding_np = siamese_net.forward_once(face1_tensor).cpu().numpy() | |
| print(f"๐ง Raw embedding shape: {embedding_np.shape}") | |
| print(f"๐งฎ Embedding mean={np.mean(embedding_np):.5f}, std={np.std(embedding_np):.5f}") | |
| if embedding_np.ndim == 1: | |
| embedding_np = embedding_np.reshape(1, -1) | |
| # --- Load Scaler and Classifier --- | |
| try: | |
| scaler = safe_load_model(SCALER_PATH, "joblib") | |
| classifier = safe_load_model(DECISION_TREE_MODEL_PATH, "joblib") | |
| print("โ Loaded scaler and classifier via joblib.") | |
| except Exception as e: | |
| print(f"โ ๏ธ Joblib load failed: {e}, trying pickle...") | |
| scaler = safe_load_model(SCALER_PATH, "pickle") | |
| classifier = safe_load_model(DECISION_TREE_MODEL_PATH, "pickle") | |
| # --- Validate Objects --- | |
| if not hasattr(scaler, 'transform'): | |
| print("โ Scaler missing transform() โ corrupted file?") | |
| return "UNKNOWN_CLASS" | |
| if not hasattr(classifier, 'predict'): | |
| print("โ Classifier missing predict() โ corrupted file?") | |
| return "UNKNOWN_CLASS" | |
| # --- Transform Embedding --- | |
| try: | |
| embedding_scaled = scaler.transform(embedding_np) | |
| except Exception as e: | |
| print(f"โ Scaler transform failed: {e}") | |
| return "UNKNOWN_CLASS" | |
| print(f"โ๏ธ Scaled embedding (first 5 vals): {embedding_scaled[0, :5]}") | |
| print(f"โ๏ธ Scaled embedding mean={np.mean(embedding_scaled):.5f}, std={np.std(embedding_scaled):.5f}") | |
| # --- Predict Class --- | |
| try: | |
| prediction = classifier.predict(embedding_scaled) | |
| print(f"๐งพ Raw classifier prediction: {prediction}") | |
| if hasattr(classifier, 'predict_proba'): | |
| proba = classifier.predict_proba(embedding_scaled) | |
| print(f"๐ Prediction probabilities: {np.round(proba, 3)}") | |
| except Exception as e: | |
| print(f"โ Prediction failed: {e}") | |
| return "UNKNOWN_CLASS" | |
| predicted_label_index = int(prediction[0]) | |
| # --- Map Index to Class Name --- | |
| if 0 <= predicted_label_index < len(CLASS_NAMES): | |
| class_name = CLASS_NAMES[predicted_label_index] | |
| print(f"โ Final Prediction: Index {predicted_label_index} -> {class_name}") | |
| return class_name | |
| else: | |
| print(f"โ ๏ธ Invalid class index: {predicted_label_index}") | |
| return "UNKNOWN_CLASS" | |
| except Exception as e: | |
| print(f"๐ฅ Error in get_face_class(): {e}") | |
| return f"Error: {str(e)}" | |