sahadev10 commited on
Commit
b760957
·
verified ·
1 Parent(s): bf4c03b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +127 -102
app.py CHANGED
@@ -118,158 +118,183 @@
118
 
119
 
120
 
121
-
 
122
  import base64
123
- from io import BytesIO
124
- from PIL import Image
125
  import requests
 
126
  import torch
127
  import numpy as np
128
  import cv2
129
- import json
130
- import os
131
- import gradio as gr
132
  from detectron2.engine import DefaultPredictor
133
  from detectron2.config import get_cfg
134
  from detectron2 import model_zoo
135
 
136
- # === Save to DB function ===
137
- def save_to_database(measurements, image, user_height_cm, user_id):
138
- if not user_id:
139
- return "❌ user_id missing from URL."
140
- if measurements is None or image is None:
141
- return "⚠️ No data to save."
142
-
143
- buffered = BytesIO()
144
- pil_image = Image.fromarray(image)
145
- pil_image.save(buffered, format="JPEG")
146
- img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
147
-
148
- payload = {
149
- "imageBase64": img_str,
150
- "heightCm": user_height_cm,
151
- "waistCircumferenceCm": measurements.get("Waist Circumference (cm)"),
152
- "hipCircumferenceCm": measurements.get("Hip Circumference (cm)"),
153
- "shoulderWidthCm": measurements.get("Shoulder Width (cm)"),
154
- "torsoLengthCm": measurements.get("Torso Length (Neck to Pelvis, cm)"),
155
- "fullArmLengthCm": measurements.get("Arm Length (Shoulder to Wrist, cm)"),
156
- "neckToKneeLengthCm": measurements.get("Neck to Knee Length (cm)"),
157
- }
158
-
159
- try:
160
- response = requests.post(
161
- f"https://7da2-2409-4042-6e81-1806-de6-b8e5-836c-6b95.ngrok-free.app/upload/{user_id}",
162
- json=payload
163
- )
164
- if response.status_code == 201:
165
- return "✅ Measurements and image saved to database!"
166
- else:
167
- return f"❌ Failed: {response.status_code} - {response.text}"
168
- except Exception as e:
169
- return f"⚠️ Error during save: {str(e)}"
170
-
171
- # Setup Detectron2 Keypoint R-CNN model
172
- output_dir = "key/"
173
- os.makedirs(output_dir, exist_ok=True)
174
- output_file = os.path.join(output_dir, "keypoints.json")
175
-
176
  cfg = get_cfg()
177
  cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
178
  cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
179
  cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
180
  predictor = DefaultPredictor(cfg)
181
 
182
- def process_image(image, user_height_cm):
183
- image_np = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
184
- outputs = predictor(image_np)
185
 
 
 
 
 
186
  instances = outputs["instances"]
187
  keypoints = instances.pred_keypoints.cpu().numpy().tolist() if instances.has("pred_keypoints") else None
188
 
189
  if not keypoints:
190
- return "No keypoints detected.", None
191
-
192
- with open(output_file, "w") as f:
193
- json.dump({"keypoints": keypoints}, f, indent=4)
194
 
195
  keypoints = np.array(keypoints[0])[:, :2]
196
 
197
- NOSE, L_SHOULDER, R_SHOULDER = 0, 5, 6
198
- L_WRIST = 9
199
- L_HIP, R_HIP = 11, 12
200
- L_KNEE, R_KNEE = 13, 14
201
- L_ANKLE, R_ANKLE = 15, 16
202
-
203
- skeleton = [(L_SHOULDER, R_SHOULDER), (L_SHOULDER, L_HIP), (R_SHOULDER, R_HIP), (L_HIP, R_HIP)]
204
-
205
- # Draw keypoints and skeleton for visualization
206
  for x, y in keypoints:
207
- cv2.circle(image_np, (int(x), int(y)), 5, (0, 255, 0), -1)
208
  for pt1, pt2 in skeleton:
209
  x1, y1 = map(int, keypoints[pt1])
210
  x2, y2 = map(int, keypoints[pt2])
211
- cv2.line(image_np, (x1, y1), (x2, y2), (255, 0, 0), 2)
212
 
213
- def get_distance(p1, p2):
214
- return np.linalg.norm(np.array(p1) - np.array(p2))
 
 
 
 
 
215
 
 
216
  ankle_mid = ((keypoints[L_ANKLE] + keypoints[R_ANKLE]) / 2).tolist()
217
  pixel_height = get_distance(keypoints[NOSE], ankle_mid)
218
- estimated_full_pixel_height = pixel_height / 0.87
219
  pixels_per_cm = estimated_full_pixel_height / user_height_cm
220
 
 
221
  shoulder_width_cm = get_distance(keypoints[L_SHOULDER], keypoints[R_SHOULDER]) / pixels_per_cm
222
  waist_width_cm = get_distance(keypoints[L_HIP], keypoints[R_HIP]) / pixels_per_cm
223
-
224
  pelvis = ((keypoints[L_HIP] + keypoints[R_HIP]) / 2).tolist()
225
- neck = ((keypoints[L_SHOULDER] + keypoints[R_SHOULDER]) / 2).tolist()
226
- torso_length_cm = get_distance(neck, pelvis) / pixels_per_cm
227
-
228
  arm_length_cm = get_distance(keypoints[L_SHOULDER], keypoints[L_WRIST]) / pixels_per_cm
229
 
230
- knee_mid = ((keypoints[L_KNEE] + keypoints[R_KNEE]) / 2).tolist()
231
- neck_to_knee_cm = get_distance(neck, knee_mid) / pixels_per_cm
232
 
233
- waist_circumference = np.pi * waist_width_cm
234
- hip_circumference = waist_circumference / 0.75
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
  measurements = {
 
 
237
  "Shoulder Width (cm)": round(shoulder_width_cm, 2),
238
- "Waist Circumference (cm)": round(waist_circumference, 2),
239
- "Hip Circumference (cm)": round(hip_circumference, 2),
240
  "Torso Length (Neck to Pelvis, cm)": round(torso_length_cm, 2),
241
- "Arm Length (Shoulder to Wrist, cm)": round(arm_length_cm, 2),
242
- "Neck to Knee Length (cm)": round(neck_to_knee_cm, 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  }
244
 
245
- return measurements, np.array(cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB))
 
 
 
 
 
 
 
 
 
 
 
246
 
247
- # Gradio Interface
248
  with gr.Blocks() as demo:
249
- gr.Markdown("## 🧍 Keypoint-Based Body Measurement Tool")
250
- gr.Markdown("Upload a **full-body image** and enter your **height (in cm)** to estimate body measurements using AI-powered keypoint detection.")
 
 
 
 
251
 
252
  with gr.Row():
253
  with gr.Column():
254
- image_input = gr.Image(type="pil", label="📸 Upload Image")
255
- user_id_input = gr.Textbox(label="🆔 User ID", placeholder="Enter your user ID")
256
- submit_btn = gr.Button("🔍 Generate Measurements")
 
257
  with gr.Column():
258
- height_input = gr.Number(label="📏 Your Height (cm)", value=170)
259
- measurement_output = gr.JSON(label="📐 Estimated Measurements")
260
- save_status = gr.Textbox(label="��� Save Status")
261
-
262
- def process_and_save(image, height, user_id):
263
- measurements, img_with_keypoints = process_image(image, height)
264
- if isinstance(measurements, str): # Means an error message
265
- return measurements, None, ""
266
- save_msg = save_to_database(measurements, np.array(img_with_keypoints), height, user_id)
267
- return measurements, img_with_keypoints, save_msg
268
-
269
- submit_btn.click(
270
- fn=process_and_save,
271
- inputs=[image_input, height_input, user_id_input],
272
- outputs=[measurement_output, image_input, save_status]
 
 
 
 
273
  )
274
 
275
- demo.launch()
 
 
 
 
 
 
 
 
 
118
 
119
 
120
 
121
+ import gradio as gr
122
+ import json
123
  import base64
 
 
124
  import requests
125
+ from io import BytesIO
126
  import torch
127
  import numpy as np
128
  import cv2
129
+ from PIL import Image
 
 
130
  from detectron2.engine import DefaultPredictor
131
  from detectron2.config import get_cfg
132
  from detectron2 import model_zoo
133
 
134
+ # === Set up Detectron2 model ===
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  cfg = get_cfg()
136
  cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
137
  cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
138
  cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
139
  predictor = DefaultPredictor(cfg)
140
 
141
+ # === Utility ===
142
+ def get_distance(p1, p2):
143
+ return np.linalg.norm(np.array(p1) - np.array(p2))
144
 
145
+ # === Keypoint and Measurement Logic ===
146
+ def process_image(image, user_height_cm):
147
+ image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
148
+ outputs = predictor(image_cv)
149
  instances = outputs["instances"]
150
  keypoints = instances.pred_keypoints.cpu().numpy().tolist() if instances.has("pred_keypoints") else None
151
 
152
  if not keypoints:
153
+ return "No keypoints detected.", None, None
 
 
 
154
 
155
  keypoints = np.array(keypoints[0])[:, :2]
156
 
157
+ # Draw keypoints and skeleton
158
+ skeleton = [(5, 6), (5, 11), (6, 12), (11, 12)]
 
 
 
 
 
 
 
159
  for x, y in keypoints:
160
+ cv2.circle(image_cv, (int(x), int(y)), 5, (0, 255, 0), -1)
161
  for pt1, pt2 in skeleton:
162
  x1, y1 = map(int, keypoints[pt1])
163
  x2, y2 = map(int, keypoints[pt2])
164
+ cv2.line(image_cv, (x1, y1), (x2, y2), (255, 0, 0), 2)
165
 
166
+ # Body part indices from COCO keypoints
167
+ NOSE, L_SHOULDER, R_SHOULDER = 0, 5, 6
168
+ L_ELBOW, R_ELBOW = 7, 8
169
+ L_WRIST, R_WRIST = 9, 10
170
+ L_HIP, R_HIP = 11, 12
171
+ L_KNEE, R_KNEE = 13, 14
172
+ L_ANKLE, R_ANKLE = 15, 16
173
 
174
+ # Calculate pixel height based on nose to midpoint of ankles (rough estimate)
175
  ankle_mid = ((keypoints[L_ANKLE] + keypoints[R_ANKLE]) / 2).tolist()
176
  pixel_height = get_distance(keypoints[NOSE], ankle_mid)
177
+ estimated_full_pixel_height = pixel_height / 0.87 # Adjust factor to estimate full height if partially visible
178
  pixels_per_cm = estimated_full_pixel_height / user_height_cm
179
 
180
+ # Measurements in cm
181
  shoulder_width_cm = get_distance(keypoints[L_SHOULDER], keypoints[R_SHOULDER]) / pixels_per_cm
182
  waist_width_cm = get_distance(keypoints[L_HIP], keypoints[R_HIP]) / pixels_per_cm
 
183
  pelvis = ((keypoints[L_HIP] + keypoints[R_HIP]) / 2).tolist()
184
+ neck_point = ((keypoints[L_SHOULDER] + keypoints[R_SHOULDER]) / 2).tolist()
185
+ torso_length_cm = get_distance(neck_point, pelvis) / pixels_per_cm
 
186
  arm_length_cm = get_distance(keypoints[L_SHOULDER], keypoints[L_WRIST]) / pixels_per_cm
187
 
188
+ # Additional measurements
 
189
 
190
+ # Neck circumference: approximate neck width * pi
191
+ neck_left = keypoints[L_SHOULDER]
192
+ neck_right = keypoints[R_SHOULDER]
193
+ neck_width_cm = get_distance(neck_left, neck_right) / pixels_per_cm
194
+ neck_circumference_cm = neck_width_cm * np.pi
195
+
196
+ # Calf circumference: approximate calf width at ankle * pi
197
+ ankle_left = keypoints[L_ANKLE]
198
+ ankle_right = keypoints[R_ANKLE]
199
+ calf_width_cm = get_distance(ankle_left, ankle_right) / pixels_per_cm
200
+ calf_circumference_cm = calf_width_cm * np.pi
201
+
202
+ # Waist and Hip circumferences
203
+ waist_circumference_cm = waist_width_cm * np.pi
204
+ hip_circumference_cm = waist_circumference_cm / 0.75 # Approximate relation
205
 
206
  measurements = {
207
+ "Waist Circumference (cm)": round(waist_circumference_cm, 2),
208
+ "Hip Circumference (cm)": round(hip_circumference_cm, 2),
209
  "Shoulder Width (cm)": round(shoulder_width_cm, 2),
 
 
210
  "Torso Length (Neck to Pelvis, cm)": round(torso_length_cm, 2),
211
+ "Full Arm Length (Shoulder to Wrist, cm)": round(arm_length_cm, 2),
212
+ "Neck Circumference (cm)": round(neck_circumference_cm, 2),
213
+ "Calf Circumference (cm)": round(calf_circumference_cm, 2),
214
+ }
215
+
216
+ return measurements, cv2.cvtColor(image_cv, cv2.COLOR_BGR2RGB), keypoints.tolist()
217
+
218
+
219
+ def save_to_database(measurements, image, user_height_cm, user_id):
220
+ if not user_id:
221
+ return "❌ user_id missing from URL."
222
+ if measurements is None or image is None:
223
+ return "⚠️ No data to save."
224
+
225
+ buffered = BytesIO()
226
+ pil_image = Image.fromarray(image)
227
+ pil_image.save(buffered, format="JPEG")
228
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
229
+
230
+ payload = {
231
+ "imageBase64": img_str,
232
+ "heightCm": user_height_cm,
233
+ "waistCircumferenceCm": measurements.get("Waist Circumference (cm)"),
234
+ "hipCircumferenceCm": measurements.get("Hip Circumference (cm)"),
235
+ "shoulderWidthCm": measurements.get("Shoulder Width (cm)"),
236
+ "torsoLengthCm": measurements.get("Torso Length (Neck to Pelvis, cm)"),
237
+ "fullArmLengthCm": measurements.get("Full Arm Length (Shoulder to Wrist, cm)"),
238
+ "neckCircumferenceCm": measurements.get("Neck Circumference (cm)"),
239
+ "calfCircumferenceCm": measurements.get("Calf Circumference (cm)"),
240
  }
241
 
242
+ try:
243
+ response = requests.post(
244
+ f"https://7da2-2409-4042-6e81-1806-de6-b8e5-836c-6b95.ngrok-free.app/upload/{user_id}",
245
+ json=payload
246
+ )
247
+ if response.status_code == 201:
248
+ return "✅ Measurements and image saved to database!"
249
+ else:
250
+ return f"❌ Failed: {response.status_code} - {response.text}"
251
+ except Exception as e:
252
+ return f"⚠️ Error during save: {str(e)}"
253
+
254
 
255
+ # === Gradio App ===
256
  with gr.Blocks() as demo:
257
+ gr.Markdown("# 📏 AI-Powered Body Measurement Tool")
258
+ user_id_state = gr.State()
259
+
260
+ @demo.load(inputs=None, outputs=[user_id_state])
261
+ def load_user_id(request: gr.Request):
262
+ return request.query_params.get("user_id", "")
263
 
264
  with gr.Row():
265
  with gr.Column():
266
+ image_input = gr.Image(label="Upload Your Full Body Image", type="pil")
267
+ height_input = gr.Number(label="Your Real Height (in cm)")
268
+ process_button = gr.Button("📐 Extract Measurements")
269
+
270
  with gr.Column():
271
+ output_image = gr.Image(label="Detected Keypoints")
272
+ measurement_output = gr.JSON(label="Body Measurements")
273
+
274
+ with gr.Row():
275
+ save_button = gr.Button("💾 Save to Backend")
276
+ save_status = gr.Textbox(label="Status", interactive=False)
277
+
278
+ # Store results for save
279
+ processed_img = gr.State()
280
+ processed_data = gr.State()
281
+
282
+ process_button.click(
283
+ fn=process_image,
284
+ inputs=[image_input, height_input],
285
+ outputs=[measurement_output, output_image, processed_data]
286
+ ).then(
287
+ fn=lambda img: img,
288
+ inputs=[output_image],
289
+ outputs=processed_img
290
  )
291
 
292
+ save_button.click(
293
+ fn=save_to_database,
294
+ inputs=[measurement_output, processed_img, height_input, user_id_state],
295
+ outputs=save_status
296
+ )
297
+
298
+ if __name__ == "__main__":
299
+ demo.launch()
300
+