Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,6 +9,15 @@ from detectron2.detectron2.config import get_cfg
|
|
| 9 |
from detectron2 import model_zoo
|
| 10 |
import torch_utils
|
| 11 |
import dnnlib
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# Output directory
|
| 14 |
output_dir = "key/"
|
|
@@ -30,7 +39,7 @@ def process_image(image, user_height_cm):
|
|
| 30 |
keypoints = instances.pred_keypoints.cpu().numpy().tolist() if instances.has("pred_keypoints") else None
|
| 31 |
|
| 32 |
if not keypoints:
|
| 33 |
-
return "No keypoints detected."
|
| 34 |
|
| 35 |
with open(output_file, "w") as f:
|
| 36 |
json.dump({"keypoints": keypoints}, f, indent=4)
|
|
@@ -44,6 +53,15 @@ def process_image(image, user_height_cm):
|
|
| 44 |
L_KNEE, R_KNEE = 13, 14
|
| 45 |
L_ANKLE, R_ANKLE = 15, 16
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
def get_distance(p1, p2):
|
| 48 |
return np.linalg.norm(np.array(p1) - np.array(p2))
|
| 49 |
|
|
@@ -68,34 +86,30 @@ def process_image(image, user_height_cm):
|
|
| 68 |
hip_circumference = waist_circumference / 0.75
|
| 69 |
|
| 70 |
measurements = {
|
| 71 |
-
"Shoulder Width (cm)": round(shoulder_width_cm, 2),
|
| 72 |
-
"Waist Circumference (cm)": round(waist_circumference, 2),
|
| 73 |
-
"Hip Circumference (cm)": round(hip_circumference, 2),
|
| 74 |
-
"Torso Length (Neck to Pelvis, cm)": round(torso_length_cm, 2),
|
| 75 |
-
"Arm Length (Shoulder to Wrist, cm)": round(arm_length_cm, 2),
|
| 76 |
-
"Neck to Knee Length (cm)": round(neck_to_knee_cm, 2)
|
| 77 |
}
|
| 78 |
|
| 79 |
-
return measurements
|
| 80 |
|
| 81 |
# Gradio Interface
|
| 82 |
with gr.Blocks() as demo:
|
| 83 |
-
gr.Markdown("## π§
|
| 84 |
-
gr.Markdown("Upload a
|
| 85 |
|
| 86 |
with gr.Row():
|
| 87 |
with gr.Column():
|
| 88 |
image_input = gr.Image(type="pil", label="πΈ Upload Image")
|
| 89 |
height_input = gr.Number(label="π Your Height (cm)", value=170)
|
| 90 |
-
submit_btn = gr.Button("π
|
| 91 |
-
|
| 92 |
with gr.Column():
|
| 93 |
measurement_output = gr.JSON(label="π Estimated Measurements")
|
|
|
|
| 94 |
|
| 95 |
-
submit_btn.click(
|
| 96 |
-
fn=process_image,
|
| 97 |
-
inputs=[image_input, height_input],
|
| 98 |
-
outputs=[measurement_output]
|
| 99 |
-
)
|
| 100 |
|
| 101 |
demo.launch()
|
|
|
|
| 9 |
from detectron2 import model_zoo
|
| 10 |
import torch_utils
|
| 11 |
import dnnlib
|
| 12 |
+
import torch
|
| 13 |
+
import numpy as np
|
| 14 |
+
import cv2
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
import gradio as gr
|
| 18 |
+
from detectron2.engine import DefaultPredictor
|
| 19 |
+
from detectron2.config import get_cfg
|
| 20 |
+
from detectron2 import model_zoo
|
| 21 |
|
| 22 |
# Output directory
|
| 23 |
output_dir = "key/"
|
|
|
|
| 39 |
keypoints = instances.pred_keypoints.cpu().numpy().tolist() if instances.has("pred_keypoints") else None
|
| 40 |
|
| 41 |
if not keypoints:
|
| 42 |
+
return "No keypoints detected.", None
|
| 43 |
|
| 44 |
with open(output_file, "w") as f:
|
| 45 |
json.dump({"keypoints": keypoints}, f, indent=4)
|
|
|
|
| 53 |
L_KNEE, R_KNEE = 13, 14
|
| 54 |
L_ANKLE, R_ANKLE = 15, 16
|
| 55 |
|
| 56 |
+
skeleton = [(5, 6), (5, 11), (6, 12), (11, 12)]
|
| 57 |
+
|
| 58 |
+
for x, y in keypoints:
|
| 59 |
+
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 0), -1)
|
| 60 |
+
for pt1, pt2 in skeleton:
|
| 61 |
+
x1, y1 = map(int, keypoints[pt1])
|
| 62 |
+
x2, y2 = map(int, keypoints[pt2])
|
| 63 |
+
cv2.line(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
|
| 64 |
+
|
| 65 |
def get_distance(p1, p2):
|
| 66 |
return np.linalg.norm(np.array(p1) - np.array(p2))
|
| 67 |
|
|
|
|
| 86 |
hip_circumference = waist_circumference / 0.75
|
| 87 |
|
| 88 |
measurements = {
|
| 89 |
+
" Shoulder Width (cm)": round(shoulder_width_cm, 2),
|
| 90 |
+
" Waist Circumference (cm)": round(waist_circumference, 2),
|
| 91 |
+
" Hip Circumference (cm)": round(hip_circumference, 2),
|
| 92 |
+
" Torso Length (Neck to Pelvis, cm)": round(torso_length_cm, 2),
|
| 93 |
+
" Arm Length (Shoulder to Wrist, cm)": round(arm_length_cm, 2),
|
| 94 |
+
" Neck to Knee Length (cm)": round(neck_to_knee_cm, 2)
|
| 95 |
}
|
| 96 |
|
| 97 |
+
return measurements, cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 98 |
|
| 99 |
# Gradio Interface
|
| 100 |
with gr.Blocks() as demo:
|
| 101 |
+
gr.Markdown("## π§ Keypoint-Based Body Measurement Tool")
|
| 102 |
+
gr.Markdown("Upload a **full-body image** and enter your **height (in cm)** to estimate body measurements using AI-powered keypoint detection.")
|
| 103 |
|
| 104 |
with gr.Row():
|
| 105 |
with gr.Column():
|
| 106 |
image_input = gr.Image(type="pil", label="πΈ Upload Image")
|
| 107 |
height_input = gr.Number(label="π Your Height (cm)", value=170)
|
| 108 |
+
submit_btn = gr.Button("π Generate Measurements")
|
|
|
|
| 109 |
with gr.Column():
|
| 110 |
measurement_output = gr.JSON(label="π Estimated Measurements")
|
| 111 |
+
image_output = gr.Image(type="pil", label="π Keypoint Overlay")
|
| 112 |
|
| 113 |
+
submit_btn.click(fn=process_image, inputs=[image_input, height_input], outputs=[measurement_output, image_output])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
demo.launch()
|