Spaces:
Sleeping
Sleeping
Your Name
Update README.md to reflect new title, features, and usage instructions for the AI-Powered Facial and Body Feature Editor. Adjusted SDK version and enhanced ethical usage notice.
1431308
| import numpy as np | |
| import cv2 | |
| from PIL import Image | |
| def detect_features(image): | |
| """ | |
| Detect facial and body features in the input image. | |
| Args: | |
| image (numpy.ndarray): Input image in numpy array format | |
| Returns: | |
| dict: Dictionary containing detected features and their coordinates | |
| """ | |
| # Convert to uint8 if the image is float | |
| if image.dtype == np.float32 or image.dtype == np.float64: | |
| image_uint8 = (image * 255).astype(np.uint8) | |
| else: | |
| image_uint8 = image | |
| # Initialize feature dictionary | |
| features = { | |
| "Eyes": [], | |
| "Nose": [], | |
| "Lips": [], | |
| "Face": [], | |
| "Hair": [], | |
| "Body": [] | |
| } | |
| # Load pre-trained face detector | |
| face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
| eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml') | |
| # Convert to grayscale for detection | |
| gray = cv2.cvtColor(image_uint8, cv2.COLOR_RGB2GRAY) | |
| # Detect faces | |
| faces = face_cascade.detectMultiScale(gray, 1.3, 5) | |
| for (x, y, w, h) in faces: | |
| # Add face to features | |
| features["Face"].append((x, y, w, h)) | |
| # Define regions of interest for other facial features | |
| face_roi = gray[y:y+h, x:x+w] | |
| # Detect eyes | |
| eyes = eye_cascade.detectMultiScale(face_roi) | |
| for (ex, ey, ew, eh) in eyes: | |
| features["Eyes"].append((x+ex, y+ey, ew, eh)) | |
| # Approximate nose position (center of face) | |
| nose_w = w // 4 | |
| nose_h = h // 4 | |
| nose_x = x + w//2 - nose_w//2 | |
| nose_y = y + h//2 - nose_h//2 | |
| features["Nose"].append((nose_x, nose_y, nose_w, nose_h)) | |
| # Approximate lips position (lower third of face) | |
| lips_w = w // 2 | |
| lips_h = h // 6 | |
| lips_x = x + w//2 - lips_w//2 | |
| lips_y = y + 2*h//3 | |
| features["Lips"].append((lips_x, lips_y, lips_w, lips_h)) | |
| # Approximate hair region (top of face and above) | |
| hair_w = w | |
| hair_h = h // 2 | |
| hair_x = x | |
| hair_y = max(0, y - hair_h // 2) | |
| features["Hair"].append((hair_x, hair_y, hair_w, hair_h)) | |
| # If no faces detected, use whole image as body | |
| if len(faces) == 0: | |
| h, w = image.shape[:2] | |
| features["Body"].append((0, 0, w, h)) | |
| else: | |
| # Approximate body region (below face) | |
| for (x, y, w, h) in faces: | |
| body_w = w * 2 | |
| body_h = h * 3 | |
| body_x = max(0, x - w//2) | |
| body_y = y + h | |
| body_w = min(body_w, image.shape[1] - body_x) | |
| body_h = min(body_h, image.shape[0] - body_y) | |
| features["Body"].append((body_x, body_y, body_w, body_h)) | |
| return features | |
| def create_mask(image, feature_type, features): | |
| """ | |
| Create a binary mask for the selected feature type. | |
| Args: | |
| image (numpy.ndarray): Input image | |
| feature_type (str): Type of feature to mask | |
| features (dict): Dictionary of detected features | |
| Returns: | |
| numpy.ndarray: Binary mask highlighting the selected feature | |
| """ | |
| # Create empty mask | |
| mask = np.zeros(image.shape[:2], dtype=np.float32) | |
| # Map feature_type to the corresponding key in features dictionary | |
| if feature_type == "Face Shape": | |
| feature_key = "Face" | |
| elif feature_type in features: | |
| feature_key = feature_type | |
| else: | |
| # Default to Face if feature type not found | |
| feature_key = "Face" | |
| # Draw filled rectangles for the selected feature | |
| for (x, y, w, h) in features[feature_key]: | |
| # Create a filled rectangle | |
| cv2.rectangle(mask, (x, y), (x+w, y+h), 1.0, -1) | |
| # Apply Gaussian blur to soften the mask edges | |
| mask = cv2.GaussianBlur(mask, (21, 21), 0) | |
| # Normalize mask to range [0, 1] | |
| if mask.max() > 0: | |
| mask = mask / mask.max() | |
| return mask | |
| def refine_mask_with_segmentation(image, mask, feature_type): | |
| """ | |
| Refine the initial mask using image segmentation for more precise feature isolation. | |
| Args: | |
| image (numpy.ndarray): Input image | |
| mask (numpy.ndarray): Initial mask | |
| feature_type (str): Type of feature to mask | |
| Returns: | |
| numpy.ndarray: Refined binary mask | |
| """ | |
| # Convert to uint8 if the image is float | |
| if image.dtype == np.float32 or image.dtype == np.float64: | |
| image_uint8 = (image * 255).astype(np.uint8) | |
| else: | |
| image_uint8 = image | |
| # Create a masked region to focus segmentation | |
| masked_region = image_uint8.copy() | |
| for c in range(3): | |
| masked_region[:, :, c] = masked_region[:, :, c] * mask | |
| # Apply GrabCut algorithm for better segmentation | |
| # Create initial mask for GrabCut | |
| grabcut_mask = np.zeros(image.shape[:2], dtype=np.uint8) | |
| # Areas with high mask values (>0.5) are definitely foreground | |
| grabcut_mask[mask > 0.5] = cv2.GC_PR_FGD | |
| # Areas with some mask values (>0.1) are probably foreground | |
| grabcut_mask[(mask > 0.1) & (mask <= 0.5)] = cv2.GC_PR_FGD | |
| # Rest is probably background | |
| grabcut_mask[mask <= 0.1] = cv2.GC_PR_BGD | |
| # Create temporary arrays for GrabCut | |
| bgd_model = np.zeros((1, 65), np.float64) | |
| fgd_model = np.zeros((1, 65), np.float64) | |
| # Apply GrabCut | |
| try: | |
| cv2.grabCut( | |
| image_uint8, | |
| grabcut_mask, | |
| None, | |
| bgd_model, | |
| fgd_model, | |
| 5, | |
| cv2.GC_INIT_WITH_MASK | |
| ) | |
| except: | |
| # If GrabCut fails, return the original mask | |
| return mask | |
| # Create refined mask | |
| refined_mask = np.zeros_like(mask) | |
| refined_mask[grabcut_mask == cv2.GC_FGD] = 1.0 | |
| refined_mask[grabcut_mask == cv2.GC_PR_FGD] = 0.8 | |
| # Apply Gaussian blur to soften the mask edges | |
| refined_mask = cv2.GaussianBlur(refined_mask, (15, 15), 0) | |
| # Normalize mask to range [0, 1] | |
| if refined_mask.max() > 0: | |
| refined_mask = refined_mask / refined_mask.max() | |
| return refined_mask | |