Spaces:
Sleeping
Sleeping
| import torch | |
| import numpy as np | |
| import cv2 | |
| import json | |
| import os | |
| import gradio as gr | |
| from detectron2.detectron2.engine import DefaultPredictor | |
| from detectron2.detectron2.config import get_cfg | |
| from detectron2 import model_zoo | |
| import torch_utils | |
| import dnnlib | |
| # Create output directory if it doesn't exist | |
| output_dir = "key/" | |
| os.makedirs(output_dir, exist_ok=True) | |
| output_file = os.path.join(output_dir, "keypoints.json") | |
| # Load pre-trained Keypoint R-CNN model | |
| cfg = get_cfg() | |
| cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")) | |
| cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml") | |
| cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| # Load the predictor | |
| predictor = DefaultPredictor(cfg) | |
| def process_image(image, user_height_cm): | |
| # Convert Gradio image input to OpenCV format | |
| image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) | |
| # Run keypoint detection | |
| outputs = predictor(image) | |
| # Extract keypoints | |
| instances = outputs["instances"] | |
| keypoints = instances.pred_keypoints.cpu().numpy().tolist() if instances.has("pred_keypoints") else None | |
| if not keypoints: | |
| return "No keypoints detected.", None | |
| # Save keypoints to JSON | |
| with open(output_file, "w") as f: | |
| json.dump({"keypoints": keypoints}, f, indent=4) | |
| keypoints = np.array(keypoints[0])[:, :2] # Extract (x, y) coordinates | |
| # COCO format indices | |
| NOSE, L_SHOULDER, R_SHOULDER = 0, 5, 6 | |
| L_ELBOW, R_ELBOW = 7, 8 | |
| L_WRIST, R_WRIST = 9, 10 | |
| L_HIP, R_HIP = 11, 12 | |
| L_ANKLE, R_ANKLE = 15, 16 | |
| # Define Keypoint Pairs for Drawing Lines (COCO Format) | |
| skeleton = [(5, 6), (5, 11), (6, 12), (11, 12)] | |
| # Draw Keypoints | |
| for x, y in keypoints: | |
| cv2.circle(image, (int(x), int(y)), 5, (0, 255, 0), -1) | |
| # Draw Skeleton | |
| for pt1, pt2 in skeleton: | |
| x1, y1 = map(int, keypoints[pt1]) | |
| x2, y2 = map(int, keypoints[pt2]) | |
| cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 2) | |
| # Function to calculate Euclidean distance | |
| def get_distance(p1, p2): | |
| return np.linalg.norm(np.array(p1) - np.array(p2)) | |
| # Calculate full height (consider head length) | |
| ankle_mid = ((keypoints[L_ANKLE] + keypoints[R_ANKLE]) / 2).tolist() | |
| pixel_height = get_distance(keypoints[NOSE], ankle_mid) | |
| # Estimated full body height (add approx head length) | |
| estimated_full_pixel_height = pixel_height / 0.87 # Since 87% = nose to ankle | |
| pixels_per_cm = estimated_full_pixel_height / user_height_cm | |
| # Waist and shoulder measurements | |
| shoulder_width_px = get_distance(keypoints[L_SHOULDER], keypoints[R_SHOULDER]) | |
| waist_width_px = get_distance(keypoints[L_HIP], keypoints[R_HIP]) | |
| # Convert to cm | |
| shoulder_width_cm = shoulder_width_px / pixels_per_cm | |
| waist_width_cm = waist_width_px / pixels_per_cm | |
| # Torso Length (Neck to Pelvis) | |
| pelvis = ((keypoints[L_HIP] + keypoints[R_HIP]) / 2).tolist() | |
| neck = ((keypoints[L_SHOULDER] + keypoints[R_SHOULDER]) / 2).tolist() | |
| torso_length_px = get_distance(neck, pelvis) | |
| torso_length_cm = torso_length_px / pixels_per_cm | |
| # Arm Length (Shoulder to Wrist) | |
| arm_length_px = get_distance(keypoints[L_SHOULDER], keypoints[L_WRIST]) | |
| arm_length_cm = arm_length_px / pixels_per_cm | |
| # Calculate waist and hip circumference (Ellipse approximation) | |
| # Waist circumference ≈ π × (waist_width / 2) × 2 | |
| waist_circumference = np.pi * waist_width_cm | |
| hip_circumference = waist_circumference / 0.75 # Assuming hip is slightly bigger than waist | |
| # Improved body measurement calculation | |
| def calculate_body_measurements(waist_circumference, hip_circumference, shoulder_width_cm, torso_length_cm, arm_length_cm): | |
| return { | |
| "Waist Circumference (cm)": round(waist_circumference, 2), | |
| "Hip Circumference (cm)": round(hip_circumference, 2), | |
| "Shoulder Width (cm)": round(shoulder_width_cm, 2), | |
| "Torso Length (Neck to Pelvis, cm)": round(torso_length_cm, 2), | |
| "Full Arm Length (Shoulder to Wrist, cm)": round(arm_length_cm, 2), | |
| } | |
| measurements = calculate_body_measurements(waist_circumference, hip_circumference, shoulder_width_cm, torso_length_cm, arm_length_cm) | |
| return measurements, cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
| # Gradio Interface | |
| demo = gr.Interface( | |
| fn=process_image, | |
| inputs=[gr.Image(type="pil"), gr.Number(label="User Height (cm)")], | |
| outputs=[gr.JSON(label="Measurements"), gr.Image(type="pil", label="Keypoint Overlay")], | |
| title="Keypoint Measurement Extractor", | |
| description="Upload an image, enter your height, and get body measurements based on keypoints.", | |
| ) | |
| demo.launch() | |