AnishaNaik03 commited on
Commit
098fb44
·
1 Parent(s): 03bccb1

add app and requirements

Browse files
Files changed (2) hide show
  1. app.py +123 -0
  2. requirements.txt +14 -0
app.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import cv2
4
+ import json
5
+ import os
6
+ import gradio as gr
7
+ from detectron2.engine import DefaultPredictor
8
+ from detectron2.config import get_cfg
9
+ from detectron2 import model_zoo
10
+
11
+ # Create output directory if it doesn't exist
12
+ output_dir = "key/"
13
+ os.makedirs(output_dir, exist_ok=True)
14
+ output_file = os.path.join(output_dir, "keypoints.json")
15
+
16
+ # Load pre-trained Keypoint R-CNN model
17
+ cfg = get_cfg()
18
+ cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
19
+ cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
20
+ cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
21
+
22
+ # Load the predictor
23
+ predictor = DefaultPredictor(cfg)
24
+
25
+ def process_image(image, user_height_cm):
26
+ # Convert Gradio image input to OpenCV format
27
+ image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
28
+
29
+ # Run keypoint detection
30
+ outputs = predictor(image)
31
+
32
+ # Extract keypoints
33
+ instances = outputs["instances"]
34
+ keypoints = instances.pred_keypoints.cpu().numpy().tolist() if instances.has("pred_keypoints") else None
35
+
36
+ if not keypoints:
37
+ return "No keypoints detected.", None
38
+
39
+ # Save keypoints to JSON
40
+ with open(output_file, "w") as f:
41
+ json.dump({"keypoints": keypoints}, f, indent=4)
42
+
43
+ keypoints = np.array(keypoints[0])[:, :2] # Extract (x, y) coordinates
44
+
45
+ # COCO format indices
46
+ NOSE, L_SHOULDER, R_SHOULDER = 0, 5, 6
47
+ L_ELBOW, R_ELBOW = 7, 8
48
+ L_WRIST, R_WRIST = 9, 10
49
+ L_HIP, R_HIP = 11, 12
50
+ L_ANKLE, R_ANKLE = 15, 16
51
+
52
+ # Define Keypoint Pairs for Drawing Lines (COCO Format)
53
+ skeleton = [(5, 6), (5, 11), (6, 12), (11, 12)]
54
+
55
+ # Draw Keypoints
56
+ for x, y in keypoints:
57
+ cv2.circle(image, (int(x), int(y)), 5, (0, 255, 0), -1)
58
+
59
+ # Draw Skeleton
60
+ for pt1, pt2 in skeleton:
61
+ x1, y1 = map(int, keypoints[pt1])
62
+ x2, y2 = map(int, keypoints[pt2])
63
+ cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
64
+
65
+ # Function to calculate Euclidean distance
66
+ def get_distance(p1, p2):
67
+ return np.linalg.norm(np.array(p1) - np.array(p2))
68
+
69
+ # Calculate full height (consider head length)
70
+ ankle_mid = ((keypoints[L_ANKLE] + keypoints[R_ANKLE]) / 2).tolist()
71
+ pixel_height = get_distance(keypoints[NOSE], ankle_mid)
72
+
73
+ # Estimated full body height (add approx head length)
74
+ estimated_full_pixel_height = pixel_height / 0.87 # Since 87% = nose to ankle
75
+ pixels_per_cm = estimated_full_pixel_height / user_height_cm
76
+
77
+ # Waist and shoulder measurements
78
+ shoulder_width_px = get_distance(keypoints[L_SHOULDER], keypoints[R_SHOULDER])
79
+ waist_width_px = get_distance(keypoints[L_HIP], keypoints[R_HIP])
80
+
81
+ # Convert to cm
82
+ shoulder_width_cm = shoulder_width_px / pixels_per_cm
83
+ waist_width_cm = waist_width_px / pixels_per_cm
84
+
85
+ # Torso Length (Neck to Pelvis)
86
+ pelvis = ((keypoints[L_HIP] + keypoints[R_HIP]) / 2).tolist()
87
+ neck = ((keypoints[L_SHOULDER] + keypoints[R_SHOULDER]) / 2).tolist()
88
+ torso_length_px = get_distance(neck, pelvis)
89
+ torso_length_cm = torso_length_px / pixels_per_cm
90
+
91
+ # Arm Length (Shoulder to Wrist)
92
+ arm_length_px = get_distance(keypoints[L_SHOULDER], keypoints[L_WRIST])
93
+ arm_length_cm = arm_length_px / pixels_per_cm
94
+
95
+ # Calculate waist and hip circumference (Ellipse approximation)
96
+ # Waist circumference ≈ π × (waist_width / 2) × 2
97
+ waist_circumference = np.pi * waist_width_cm
98
+ hip_circumference = waist_circumference / 0.75 # Assuming hip is slightly bigger than waist
99
+
100
+ # Improved body measurement calculation
101
+ def calculate_body_measurements(waist_circumference, hip_circumference, shoulder_width_cm, torso_length_cm, arm_length_cm):
102
+ return {
103
+ "Waist Circumference (cm)": round(waist_circumference, 2),
104
+ "Hip Circumference (cm)": round(hip_circumference, 2),
105
+ "Shoulder Width (cm)": round(shoulder_width_cm, 2),
106
+ "Torso Length (Neck to Pelvis, cm)": round(torso_length_cm, 2),
107
+ "Full Arm Length (Shoulder to Wrist, cm)": round(arm_length_cm, 2),
108
+ }
109
+
110
+ measurements = calculate_body_measurements(waist_circumference, hip_circumference, shoulder_width_cm, torso_length_cm, arm_length_cm)
111
+
112
+ return measurements, cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
113
+
114
+ # Gradio Interface
115
+ demo = gr.Interface(
116
+ fn=process_image,
117
+ inputs=[gr.Image(type="pil"), gr.Number(label="User Height (cm)")],
118
+ outputs=[gr.JSON(label="Measurements"), gr.Image(type="pil", label="Keypoint Overlay")],
119
+ title="Keypoint Measurement Extractor",
120
+ description="Upload an image, enter your height, and get body measurements based on keypoints.",
121
+ )
122
+
123
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=1.10
2
+ torchvision
3
+ opencv-python
4
+ numpy
5
+ gradio
6
+ pyyaml
7
+ fvcore
8
+ iopath
9
+ termcolor
10
+ matplotlib
11
+ tqdm
12
+ cloudpickle
13
+ Pillow
14
+ detectron2 @ git+https://github.com/facebookresearch/detectron2.git