""" Face Shape Detection - Hugging Face Space App Uses MediaPipe for face mesh extraction and a trained ML model for classification. """ import cv2 import mediapipe as mp from mediapipe.tasks import python from mediapipe.tasks.python import vision import numpy as np import pickle import gradio as gr from pathlib import Path from PIL import Image # Paths to model files PROJECT_DIR = Path(__file__).parent MODEL_FILE = PROJECT_DIR / 'face_shape_model.pkl' LABEL_ENCODER_FILE = PROJECT_DIR / 'label_encoder.pkl' # Face shape descriptions for user-friendly output FACE_SHAPE_INFO = { "oval": { "emoji": "🥚", "description": "Balanced proportions with a slightly narrower forehead and jaw. Often considered the most versatile face shape.", "tips": "Most hairstyles and glasses work well with oval faces." }, "round": { "emoji": "🌕", "description": "Equal width and length with soft, curved lines. Full cheeks and a rounded chin.", "tips": "Angular frames and layered hairstyles can add definition." }, "square": { "emoji": "⬛", "description": "Strong, angular jawline with forehead and jaw of similar width.", "tips": "Round or oval glasses and soft, layered hairstyles complement this shape." }, "heart": { "emoji": "❤️", "description": "Wider forehead tapering to a narrower chin, often with prominent cheekbones.", "tips": "Bottom-heavy frames and chin-length hairstyles work great." }, "oblong": { "emoji": "📏", "description": "Longer than wide with a straight cheek line and sometimes a longer nose.", "tips": "Wide frames and voluminous hairstyles add width and balance." } } def normalize_landmarks(keypoints, width, height): """ Normalize keypoints to be centered, roll-corrected, and scaled. Retains 3D coordinates (Z) but aligns to the 2D plane based on eyes. """ if not keypoints: return [] landmarks = np.array([[kp["x"], kp["y"], kp["z"]] for kp in keypoints]) # Denormalize to pixel coordinates landmarks[:, 0] *= width landmarks[:, 1] *= height landmarks[:, 2] *= width # Iris indices (refine_landmarks=True gives 478 points) left_iris_idx = 468 right_iris_idx = 473 if len(landmarks) > right_iris_idx: left_iris = landmarks[left_iris_idx] right_iris = landmarks[right_iris_idx] else: # Fallback to eye corners p1 = landmarks[33] p2 = landmarks[133] left_iris = (p1 + p2) / 2 p3 = landmarks[362] p4 = landmarks[263] right_iris = (p3 + p4) / 2 # 1. Centering eye_center = (left_iris + right_iris) / 2.0 landmarks -= eye_center # 2. Rotation (Roll Correction) delta = left_iris - right_iris dX, dY = delta[0], delta[1] angle = np.arctan2(dY, dX) c, s = np.cos(-angle), np.sin(-angle) R = np.array([ [c, -s, 0], [s, c, 0], [0, 0, 1] ]) landmarks = landmarks.dot(R.T) # 3. Scaling dist = np.sqrt(dX**2 + dY**2) if dist > 0: scale = 1.0 / dist landmarks *= scale return [(round(float(l[0]), 5), round(float(l[1]), 5), round(float(l[2]), 5)) for l in landmarks] def process_image_for_mesh(img_array): """ Process image array to get face mesh data using MediaPipe Tasks API. Returns: keypoints, processed_img, error_message """ max_width_or_height = 512 model_path = str(PROJECT_DIR / 'face_landmarker.task') # Convert PIL to numpy if needed if isinstance(img_array, Image.Image): img_array = np.array(img_array) # Handle RGBA images if len(img_array.shape) == 3 and img_array.shape[2] == 4: img_array = cv2.cvtColor(img_array, cv2.COLOR_RGBA2RGB) # Ensure RGB format if len(img_array.shape) == 3 and img_array.shape[2] == 3: img_rgb = img_array.copy() else: return None, None, "Invalid image format" # Downscale large images h, w = img_rgb.shape[:2] longest = max(h, w) if longest > max_width_or_height: scale = max_width_or_height / float(longest) new_w = max(1, int(round(w * scale))) new_h = max(1, int(round(h * scale))) img_rgb = cv2.resize(img_rgb, (new_w, new_h), interpolation=cv2.INTER_AREA) # Create MediaPipe Image mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=img_rgb) # Initialize FaceLandmarker base_options = python.BaseOptions(model_asset_path=model_path) options = vision.FaceLandmarkerOptions( base_options=base_options, output_face_blendshapes=False, output_facial_transformation_matrixes=False, num_faces=1, min_face_detection_confidence=0.5) try: with vision.FaceLandmarker.create_from_options(options) as detector: # Detect landmarks detection_result = detector.detect(mp_image) if not detection_result.face_landmarks: return None, None, "No face detected in the image. Please upload a clear photo with a visible face." keypoints = [] for landmark in detection_result.face_landmarks[0]: keypoints.append({ "x": round(landmark.x, 5), "y": round(landmark.y, 5), "z": round(landmark.z, 5) }) return keypoints, img_rgb, None except Exception as e: return None, None, f"Error processing image: {str(e)}" def draw_face_mesh_overlay(img_rgb, keypoints): """Draw face mesh overlay on the image for visualization.""" img_overlay = img_rgb.copy() h, w = img_overlay.shape[:2] # Draw key landmark points for i, kp in enumerate(keypoints): x = int(kp["x"] * w) y = int(kp["y"] * h) # Draw small circles at landmark positions cv2.circle(img_overlay, (x, y), 1, (0, 255, 200), -1) # Draw face contour (simplified) contour_indices = [10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288, 397, 365, 379, 378, 400, 377, 152, 148, 176, 149, 150, 136, 172, 58, 132, 93, 234, 127, 162, 21, 54, 103, 67, 109, 10] for i in range(len(contour_indices) - 1): idx1 = contour_indices[i] idx2 = contour_indices[i + 1] if idx1 < len(keypoints) and idx2 < len(keypoints): pt1 = (int(keypoints[idx1]["x"] * w), int(keypoints[idx1]["y"] * h)) pt2 = (int(keypoints[idx2]["x"] * w), int(keypoints[idx2]["y"] * h)) cv2.line(img_overlay, pt1, pt2, (100, 255, 180), 2) return img_overlay # Load model at startup print("Loading face shape classification model...") try: with open(MODEL_FILE, 'rb') as f: model = pickle.load(f) with open(LABEL_ENCODER_FILE, 'rb') as f: label_encoder = pickle.load(f) print("Model loaded successfully!") MODEL_LOADED = True except Exception as e: print(f"Error loading model: {e}") MODEL_LOADED = False model = None label_encoder = None def predict_face_shape(image): """ Main prediction function for Gradio interface. """ if image is None: return None, "Please upload an image.", "" if not MODEL_LOADED: return None, "Model not loaded. Please check server logs.", "" # Process image and extract landmarks keypoints, img_processed, error = process_image_for_mesh(image) if error: return None, error, "" # Create visualization img_overlay = draw_face_mesh_overlay(img_processed, keypoints) # Normalize landmarks h, w = img_processed.shape[:2] normalized_kpts = normalize_landmarks(keypoints, w, h) # Prepare features (flatten x, y only) flattened_features = [] for kp in normalized_kpts: flattened_features.extend([kp[0], kp[1]]) features_array = np.array([flattened_features]) # Predict probas = model.predict_proba(features_array)[0] prediction_idx = model.predict(features_array)[0] predicted_label = label_encoder.inverse_transform([prediction_idx])[0] # Build results info = FACE_SHAPE_INFO.get(predicted_label.lower(), { "emoji": "✨", "description": "A unique face shape.", "tips": "Embrace your unique features!" }) # Format confidence scores confidence_text = "" class_indices = np.argsort(probas)[::-1] for i in class_indices: class_name = label_encoder.classes_[i] score = probas[i] bar = "█" * int(score * 20) confidence_text += f"{class_name.capitalize():10} {bar} {score*100:.1f}%\n" # Main result result_html = f"""
{info['description']}
Upload a photo to discover your face shape using AI-powered analysis
""") with gr.Row(): with gr.Column(scale=1): input_image = gr.Image( label="📷 Upload Your Photo", type="numpy", sources=["upload", "webcam"], height=400 ) analyze_btn = gr.Button("✨ Analyze Face Shape", variant="primary", size="lg") gr.Markdown(""" ### 📋 Tips for Best Results - Use a **front-facing** photo with good lighting - Ensure your **entire face** is visible - Remove glasses if possible - Avoid tilting your head """) with gr.Column(scale=1): output_image = gr.Image( label="🎯 Face Mesh Analysis", height=400 ) result_html = gr.HTML(label="Result") with gr.Accordion("📊 Confidence Scores", open=False): confidence_output = gr.Textbox( label="", lines=6, interactive=False ) gr.HTML("""
🔬 Powered by MediaPipe Face Mesh & Machine Learning
📐 Analyzes 478 facial landmarks for accurate shape detection