AnishaNaik03 commited on
Commit
d7b15c7
·
verified ·
1 Parent(s): 6b4e6f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -53
app.py CHANGED
@@ -508,6 +508,7 @@
508
 
509
  # if __name__ == "__main__":
510
  # demo.launch()
 
511
  import torch
512
  import numpy as np
513
  import cv2
@@ -518,7 +519,7 @@ from detectron2.engine import DefaultPredictor
518
  from detectron2.config import get_cfg
519
  from detectron2 import model_zoo
520
 
521
- # Create output directory if it doesn't exist
522
  output_dir = "key/"
523
  os.makedirs(output_dir, exist_ok=True)
524
  output_file = os.path.join(output_dir, "keypoints.json")
@@ -528,106 +529,86 @@ cfg = get_cfg()
528
  cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
529
  cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
530
  cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
531
-
532
- # Load the predictor
533
  predictor = DefaultPredictor(cfg)
534
 
535
  def process_image(image, user_height_cm):
536
- # Convert Gradio image input to OpenCV format
537
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
538
-
539
- # Run keypoint detection
540
  outputs = predictor(image)
541
 
542
- # Extract keypoints
543
  instances = outputs["instances"]
544
  keypoints = instances.pred_keypoints.cpu().numpy().tolist() if instances.has("pred_keypoints") else None
545
 
546
  if not keypoints:
547
  return "No keypoints detected.", None
548
 
549
- # Save keypoints to JSON
550
  with open(output_file, "w") as f:
551
  json.dump({"keypoints": keypoints}, f, indent=4)
552
 
553
- keypoints = np.array(keypoints[0])[:, :2] # Extract (x, y) coordinates
554
 
555
- # COCO format indices
556
  NOSE, L_SHOULDER, R_SHOULDER = 0, 5, 6
557
  L_ELBOW, R_ELBOW = 7, 8
558
  L_WRIST, R_WRIST = 9, 10
559
  L_HIP, R_HIP = 11, 12
 
560
  L_ANKLE, R_ANKLE = 15, 16
561
 
562
- # Define Keypoint Pairs for Drawing Lines (COCO Format)
563
  skeleton = [(5, 6), (5, 11), (6, 12), (11, 12)]
564
 
565
- # Draw Keypoints
566
  for x, y in keypoints:
567
  cv2.circle(image, (int(x), int(y)), 5, (0, 255, 0), -1)
568
-
569
- # Draw Skeleton
570
  for pt1, pt2 in skeleton:
571
  x1, y1 = map(int, keypoints[pt1])
572
  x2, y2 = map(int, keypoints[pt2])
573
  cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
574
 
575
- # Function to calculate Euclidean distance
576
  def get_distance(p1, p2):
577
  return np.linalg.norm(np.array(p1) - np.array(p2))
578
 
579
- # Calculate full height (consider head length)
580
  ankle_mid = ((keypoints[L_ANKLE] + keypoints[R_ANKLE]) / 2).tolist()
581
  pixel_height = get_distance(keypoints[NOSE], ankle_mid)
582
-
583
- # Estimated full body height (add approx head length)
584
- estimated_full_pixel_height = pixel_height / 0.87 # Since 87% = nose to ankle
585
  pixels_per_cm = estimated_full_pixel_height / user_height_cm
586
 
587
- # Waist and shoulder measurements
588
- shoulder_width_px = get_distance(keypoints[L_SHOULDER], keypoints[R_SHOULDER])
589
- waist_width_px = get_distance(keypoints[L_HIP], keypoints[R_HIP])
590
-
591
- # Convert to cm
592
- shoulder_width_cm = shoulder_width_px / pixels_per_cm
593
- waist_width_cm = waist_width_px / pixels_per_cm
594
 
595
- # Torso Length (Neck to Pelvis)
596
  pelvis = ((keypoints[L_HIP] + keypoints[R_HIP]) / 2).tolist()
597
  neck = ((keypoints[L_SHOULDER] + keypoints[R_SHOULDER]) / 2).tolist()
598
- torso_length_px = get_distance(neck, pelvis)
599
- torso_length_cm = torso_length_px / pixels_per_cm
600
 
601
- # Arm Length (Shoulder to Wrist)
602
- arm_length_px = get_distance(keypoints[L_SHOULDER], keypoints[L_WRIST])
603
- arm_length_cm = arm_length_px / pixels_per_cm
604
 
605
- # Calculate waist and hip circumference (Ellipse approximation)
606
- # Waist circumference ≈ π × (waist_width / 2) × 2
607
- waist_circumference = np.pi * waist_width_cm
608
- hip_circumference = waist_circumference / 0.75 # Assuming hip is slightly bigger than waist
609
 
610
- # Improved body measurement calculation
611
- def calculate_body_measurements(waist_circumference, hip_circumference, shoulder_width_cm, torso_length_cm, arm_length_cm):
612
- return {
613
- "Waist Circumference (cm)": round(waist_circumference, 2),
614
- "Hip Circumference (cm)": round(hip_circumference, 2),
615
- "Shoulder Width (cm)": round(shoulder_width_cm, 2),
616
- "Torso Length (Neck to Pelvis, cm)": round(torso_length_cm, 2),
617
- "Full Arm Length (Shoulder to Wrist, cm)": round(arm_length_cm, 2),
618
- }
619
 
620
- measurements = calculate_body_measurements(waist_circumference, hip_circumference, shoulder_width_cm, torso_length_cm, arm_length_cm)
 
 
 
 
 
 
 
621
 
622
  return measurements, cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
623
 
624
  # Gradio Interface
625
- demo = gr.Interface(
626
- fn=process_image,
627
- inputs=[gr.Image(type="pil"), gr.Number(label="User Height (cm)")],
628
- outputs=[gr.JSON(label="Measurements"), gr.Image(type="pil", label="Keypoint Overlay")],
629
- title="Keypoint Measurement Extractor",
630
- description="Upload an image, enter your height, and get body measurements based on keypoints.",
631
- )
 
 
 
 
 
 
632
 
633
  demo.launch()
 
508
 
509
  # if __name__ == "__main__":
510
  # demo.launch()
511
+
512
  import torch
513
  import numpy as np
514
  import cv2
 
519
  from detectron2.config import get_cfg
520
  from detectron2 import model_zoo
521
 
522
+ # Output directory
523
  output_dir = "key/"
524
  os.makedirs(output_dir, exist_ok=True)
525
  output_file = os.path.join(output_dir, "keypoints.json")
 
529
  cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
530
  cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
531
  cfg.MODEL.DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
 
 
532
  predictor = DefaultPredictor(cfg)
533
 
534
  def process_image(image, user_height_cm):
 
535
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
 
 
536
  outputs = predictor(image)
537
 
 
538
  instances = outputs["instances"]
539
  keypoints = instances.pred_keypoints.cpu().numpy().tolist() if instances.has("pred_keypoints") else None
540
 
541
  if not keypoints:
542
  return "No keypoints detected.", None
543
 
 
544
  with open(output_file, "w") as f:
545
  json.dump({"keypoints": keypoints}, f, indent=4)
546
 
547
+ keypoints = np.array(keypoints[0])[:, :2]
548
 
 
549
  NOSE, L_SHOULDER, R_SHOULDER = 0, 5, 6
550
  L_ELBOW, R_ELBOW = 7, 8
551
  L_WRIST, R_WRIST = 9, 10
552
  L_HIP, R_HIP = 11, 12
553
+ L_KNEE, R_KNEE = 13, 14
554
  L_ANKLE, R_ANKLE = 15, 16
555
 
 
556
  skeleton = [(5, 6), (5, 11), (6, 12), (11, 12)]
557
 
 
558
  for x, y in keypoints:
559
  cv2.circle(image, (int(x), int(y)), 5, (0, 255, 0), -1)
 
 
560
  for pt1, pt2 in skeleton:
561
  x1, y1 = map(int, keypoints[pt1])
562
  x2, y2 = map(int, keypoints[pt2])
563
  cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
564
 
 
565
  def get_distance(p1, p2):
566
  return np.linalg.norm(np.array(p1) - np.array(p2))
567
 
 
568
  ankle_mid = ((keypoints[L_ANKLE] + keypoints[R_ANKLE]) / 2).tolist()
569
  pixel_height = get_distance(keypoints[NOSE], ankle_mid)
570
+ estimated_full_pixel_height = pixel_height / 0.87
 
 
571
  pixels_per_cm = estimated_full_pixel_height / user_height_cm
572
 
573
+ shoulder_width_cm = get_distance(keypoints[L_SHOULDER], keypoints[R_SHOULDER]) / pixels_per_cm
574
+ waist_width_cm = get_distance(keypoints[L_HIP], keypoints[R_HIP]) / pixels_per_cm
 
 
 
 
 
575
 
 
576
  pelvis = ((keypoints[L_HIP] + keypoints[R_HIP]) / 2).tolist()
577
  neck = ((keypoints[L_SHOULDER] + keypoints[R_SHOULDER]) / 2).tolist()
578
+ torso_length_cm = get_distance(neck, pelvis) / pixels_per_cm
 
579
 
580
+ arm_length_cm = get_distance(keypoints[L_SHOULDER], keypoints[L_WRIST]) / pixels_per_cm
 
 
581
 
582
+ knee_mid = ((keypoints[L_KNEE] + keypoints[R_KNEE]) / 2).tolist()
583
+ neck_to_knee_cm = get_distance(neck, knee_mid) / pixels_per_cm
 
 
584
 
585
+ waist_circumference = np.pi * waist_width_cm
586
+ hip_circumference = waist_circumference / 0.75
 
 
 
 
 
 
 
587
 
588
+ measurements = {
589
+ "Shoulder Width (cm)": round(shoulder_width_cm, 2),
590
+ "Waist Circumference (cm)": round(waist_circumference, 2),
591
+ "Hip Circumference (cm)": round(hip_circumference, 2),
592
+ "Torso Length (Neck to Pelvis, cm)": round(torso_length_cm, 2),
593
+ "Arm Length (Shoulder to Wrist, cm)": round(arm_length_cm, 2),
594
+ "Neck to Knee Length (cm)": round(neck_to_knee_cm, 2)
595
+ }
596
 
597
  return measurements, cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
598
 
599
  # Gradio Interface
600
+ with gr.Blocks() as demo:
601
+ gr.Markdown("## 🧍 Keypoint-Based Body Measurement Tool")
602
+ gr.Markdown("Upload a **full-body image** and enter your **height (in cm)** to estimate body measurements using AI-powered keypoint detection.")
603
+
604
+ with gr.Row():
605
+ with gr.Column():
606
+ image_input = gr.Image(type="pil", label="📸 Upload Image")
607
+ submit_btn = gr.Button("🔍 Generate Measurements")
608
+ with gr.Column():
609
+ height_input = gr.Number(label="📏 Your Height (cm)", value=170)
610
+ measurement_output = gr.JSON(label="📐 Estimated Measurements")
611
+
612
+ submit_btn.click(fn=process_image, inputs=[image_input, height_input], outputs=[measurement_output, image_output])
613
 
614
  demo.launch()