Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import os
|
| 2 |
import cv2
|
| 3 |
import time
|
| 4 |
-
import threading
|
| 5 |
import numpy as np
|
| 6 |
import gradio as gr
|
|
|
|
| 7 |
from tensorflow.keras import preprocessing
|
| 8 |
import pandas as pd
|
| 9 |
from calculations import measure_body_sizes
|
|
@@ -22,106 +22,118 @@ def save_image(image, filename):
|
|
| 22 |
cv2.imwrite(filepath, image)
|
| 23 |
print(f"Image saved as {filepath}")
|
| 24 |
|
| 25 |
-
def
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
color = (0, 255, 0)
|
| 33 |
-
thickness = 2
|
| 34 |
-
cv2.rectangle(frame, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), color, thickness)
|
| 35 |
-
|
| 36 |
-
def play_countdown_audio():
|
| 37 |
-
for i in range(20, 0, -1):
|
| 38 |
-
audio_file = f"input1/countdown_{i}.mp3"
|
| 39 |
-
if os.path.exists(audio_file):
|
| 40 |
-
os.system(f"mpg123 {audio_file}")
|
| 41 |
-
time.sleep(1)
|
| 42 |
-
|
| 43 |
-
def capture_image_with_timer(label, person_height_cm, pixels_per_cm):
|
| 44 |
-
cap = cv2.VideoCapture(0)
|
| 45 |
-
if not cap.isOpened():
|
| 46 |
-
print("Error: Unable to access the camera.")
|
| 47 |
-
return None
|
| 48 |
-
|
| 49 |
-
print(f"Please position yourself for the {label} pose.")
|
| 50 |
-
|
| 51 |
-
box_height = int(2 * person_height_cm * pixels_per_cm)
|
| 52 |
-
box_width = int(2 * person_height_cm * pixels_per_cm)
|
| 53 |
-
|
| 54 |
-
countdown_thread = threading.Thread(target=play_countdown_audio)
|
| 55 |
-
countdown_thread.start()
|
| 56 |
-
|
| 57 |
-
for i in range(20, 0, -1):
|
| 58 |
-
ret, frame = cap.read()
|
| 59 |
-
if not ret:
|
| 60 |
-
print("Failed to capture frame.")
|
| 61 |
-
return None
|
| 62 |
-
draw_bounding_box(frame, box_width, box_height)
|
| 63 |
-
cv2.putText(frame, f"{label.capitalize()} pose: {i} seconds remaining", (10, 30),
|
| 64 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
|
| 65 |
-
cv2.imshow("Pose Capture", frame)
|
| 66 |
-
if cv2.waitKey(1000) & 0xFF == ord('q'):
|
| 67 |
-
break
|
| 68 |
-
|
| 69 |
-
countdown_thread.join()
|
| 70 |
-
ret, final_frame = cap.read()
|
| 71 |
-
if ret:
|
| 72 |
-
filename = f"{label}_image.jpg"
|
| 73 |
-
save_image(final_frame, filename)
|
| 74 |
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
return
|
| 78 |
|
| 79 |
def process_images(front_img, side_img, real_height_cm):
|
| 80 |
-
|
|
|
|
| 81 |
if front_img is None or side_img is None:
|
| 82 |
-
raise
|
| 83 |
|
|
|
|
| 84 |
front_image_array = preprocessing.image.img_to_array(front_img)
|
| 85 |
side_image_array = preprocessing.image.img_to_array(side_img)
|
| 86 |
|
|
|
|
| 87 |
front_result = bodypix_model.predict_single(front_image_array)
|
| 88 |
side_result = bodypix_model.predict_single(side_image_array)
|
| 89 |
|
|
|
|
| 90 |
front_mask = front_result.get_mask(threshold=0.75)
|
| 91 |
side_mask = side_result.get_mask(threshold=0.75)
|
| 92 |
|
|
|
|
| 93 |
front_colored_mask = front_result.get_colored_part_mask(front_mask)
|
| 94 |
side_colored_mask = side_result.get_colored_part_mask(side_mask)
|
| 95 |
|
|
|
|
| 96 |
front_poses = front_result.get_poses()
|
| 97 |
side_poses = side_result.get_poses()
|
| 98 |
|
| 99 |
-
#
|
| 100 |
-
body_sizes = measure_body_sizes(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
measurements_df = pd.DataFrame(body_sizes)
|
| 102 |
return measurements_df
|
| 103 |
|
| 104 |
-
def
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
front_frame = capture_image_with_timer("front", person_height_cm, pixels_per_cm)
|
| 108 |
-
side_frame = capture_image_with_timer("side", person_height_cm, pixels_per_cm)
|
| 109 |
-
|
| 110 |
-
if front_frame is None or side_frame is None:
|
| 111 |
-
return pd.DataFrame() # Return empty DataFrame if image capture fails
|
| 112 |
-
|
| 113 |
-
measurements_df = process_images(front_frame, side_frame, person_height_cm)
|
| 114 |
-
return measurements_df
|
| 115 |
-
|
| 116 |
-
def main():
|
| 117 |
with gr.Blocks() as demo:
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
demo.launch()
|
| 125 |
|
| 126 |
if __name__ == "__main__":
|
| 127 |
main()
|
|
|
|
| 1 |
import os
|
| 2 |
import cv2
|
| 3 |
import time
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
import gradio as gr
|
| 6 |
+
import threading
|
| 7 |
from tensorflow.keras import preprocessing
|
| 8 |
import pandas as pd
|
| 9 |
from calculations import measure_body_sizes
|
|
|
|
| 22 |
cv2.imwrite(filepath, image)
|
| 23 |
print(f"Image saved as {filepath}")
|
| 24 |
|
| 25 |
+
def process_webcam_image(webcam_image, label, real_height_cm):
|
| 26 |
+
"""Process a webcam image"""
|
| 27 |
+
if webcam_image is None:
|
| 28 |
+
raise gr.Error(f"No {label} image captured")
|
| 29 |
+
|
| 30 |
+
# Convert Gradio image to NumPy array
|
| 31 |
+
frame = webcam_image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
filename = f"{label}_image.jpg"
|
| 34 |
+
save_image(frame, filename)
|
| 35 |
+
return frame
|
| 36 |
|
| 37 |
def process_images(front_img, side_img, real_height_cm):
|
| 38 |
+
"""Process front and side images to extract body measurements"""
|
| 39 |
+
# Validate inputs
|
| 40 |
if front_img is None or side_img is None:
|
| 41 |
+
raise gr.Error("Both front and side images are required")
|
| 42 |
|
| 43 |
+
# Convert images to array
|
| 44 |
front_image_array = preprocessing.image.img_to_array(front_img)
|
| 45 |
side_image_array = preprocessing.image.img_to_array(side_img)
|
| 46 |
|
| 47 |
+
# Predict body parts
|
| 48 |
front_result = bodypix_model.predict_single(front_image_array)
|
| 49 |
side_result = bodypix_model.predict_single(side_image_array)
|
| 50 |
|
| 51 |
+
# Create masks
|
| 52 |
front_mask = front_result.get_mask(threshold=0.75)
|
| 53 |
side_mask = side_result.get_mask(threshold=0.75)
|
| 54 |
|
| 55 |
+
# Color the masks
|
| 56 |
front_colored_mask = front_result.get_colored_part_mask(front_mask)
|
| 57 |
side_colored_mask = side_result.get_colored_part_mask(side_mask)
|
| 58 |
|
| 59 |
+
# Get poses
|
| 60 |
front_poses = front_result.get_poses()
|
| 61 |
side_poses = side_result.get_poses()
|
| 62 |
|
| 63 |
+
# Calculate body sizes
|
| 64 |
+
body_sizes = measure_body_sizes(
|
| 65 |
+
side_colored_mask,
|
| 66 |
+
front_colored_mask,
|
| 67 |
+
side_poses,
|
| 68 |
+
front_poses,
|
| 69 |
+
real_height_cm,
|
| 70 |
+
rainbow=None
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# Convert to DataFrame
|
| 74 |
measurements_df = pd.DataFrame(body_sizes)
|
| 75 |
return measurements_df
|
| 76 |
|
| 77 |
+
def create_gradio_interface():
|
| 78 |
+
"""Create Gradio interface for body measurements"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
with gr.Blocks() as demo:
|
| 80 |
+
gr.Markdown("# Body Measurement Tool")
|
| 81 |
+
|
| 82 |
+
with gr.Row():
|
| 83 |
+
height_input = gr.Number(label="Your Height (cm)")
|
| 84 |
+
|
| 85 |
+
with gr.Row():
|
| 86 |
+
with gr.Column():
|
| 87 |
+
gr.Markdown("### Front View")
|
| 88 |
+
front_webcam = gr.Image(source="webcam", type="numpy", label="Front View")
|
| 89 |
+
front_capture_button = gr.Button("Capture Front View")
|
| 90 |
+
|
| 91 |
+
with gr.Column():
|
| 92 |
+
gr.Markdown("### Side View")
|
| 93 |
+
side_webcam = gr.Image(source="webcam", type="numpy", label="Side View")
|
| 94 |
+
side_capture_button = gr.Button("Capture Side View")
|
| 95 |
+
|
| 96 |
+
# Capture buttons functionality
|
| 97 |
+
current_front_image = gr.State(None)
|
| 98 |
+
current_side_image = gr.State(None)
|
| 99 |
+
|
| 100 |
+
def capture_front_image(webcam_input):
|
| 101 |
+
if webcam_input is None:
|
| 102 |
+
raise gr.Error("No image in webcam")
|
| 103 |
+
return webcam_input, webcam_input
|
| 104 |
+
|
| 105 |
+
def capture_side_image(webcam_input):
|
| 106 |
+
if webcam_input is None:
|
| 107 |
+
raise gr.Error("No image in webcam")
|
| 108 |
+
return webcam_input, webcam_input
|
| 109 |
+
|
| 110 |
+
front_capture_button.click(
|
| 111 |
+
fn=capture_front_image,
|
| 112 |
+
inputs=front_webcam,
|
| 113 |
+
outputs=[current_front_image, front_webcam]
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
side_capture_button.click(
|
| 117 |
+
fn=capture_side_image,
|
| 118 |
+
inputs=side_webcam,
|
| 119 |
+
outputs=[current_side_image, side_webcam]
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
# Measurement calculation button
|
| 123 |
+
calculate_button = gr.Button("Calculate Measurements")
|
| 124 |
+
output_df = gr.DataFrame(label="Body Measurements")
|
| 125 |
+
|
| 126 |
+
calculate_button.click(
|
| 127 |
+
fn=lambda height, front, side: process_images(front, side, height),
|
| 128 |
+
inputs=[height_input, current_front_image, current_side_image],
|
| 129 |
+
outputs=output_df
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
return demo
|
| 133 |
|
| 134 |
+
def main():
|
| 135 |
+
demo = create_gradio_interface()
|
| 136 |
+
demo.launch(share=True, debug=True)
|
| 137 |
|
| 138 |
if __name__ == "__main__":
|
| 139 |
main()
|