Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,32 +1,95 @@
|
|
| 1 |
import os
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import tensorflow as tf
|
| 6 |
import tf_bodypix
|
| 7 |
from tf_bodypix.api import download_model, load_model, BodyPixModelPaths
|
| 8 |
from tf_bodypix.draw import draw_poses
|
| 9 |
-
from tensorflow.keras import preprocessing
|
| 10 |
-
import cv2
|
| 11 |
-
import json
|
| 12 |
-
from matplotlib import pyplot as plt
|
| 13 |
-
import numpy as np
|
| 14 |
-
from calculations import measure_body_sizes
|
| 15 |
-
import gradio as gr
|
| 16 |
import pandas as pd
|
|
|
|
| 17 |
|
| 18 |
# Load BodyPix model
|
| 19 |
bodypix_model = load_model(download_model(BodyPixModelPaths.MOBILENET_FLOAT_50_STRIDE_16))
|
| 20 |
|
| 21 |
rainbow = [
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
]
|
| 29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
def process_images(front_img, side_img, real_height_cm):
|
| 31 |
fimage_array = preprocessing.image.img_to_array(front_img)
|
| 32 |
simage_array = preprocessing.image.img_to_array(side_img)
|
|
@@ -38,18 +101,12 @@ def process_images(front_img, side_img, real_height_cm):
|
|
| 38 |
front_mask = frontresult.get_mask(threshold=0.75)
|
| 39 |
side_mask = sideresult.get_mask(threshold=0.75)
|
| 40 |
|
| 41 |
-
# preprocessing.image.save_img(f'{output_path}/frontbodypix-mask.jpg',front_mask)
|
| 42 |
-
# preprocessing.image.save_img(f'{output_path}/sidebodypix-mask.jpg',side_mask)
|
| 43 |
-
|
| 44 |
front_colored_mask = frontresult.get_colored_part_mask(front_mask, rainbow)
|
| 45 |
side_colored_mask = sideresult.get_colored_part_mask(side_mask, rainbow)
|
| 46 |
|
| 47 |
-
# preprocessing.image.save_img(f'{output_path}/frontbodypix-colored-mask.jpg',front_colored_mask)
|
| 48 |
-
# preprocessing.image.save_img(f'{output_path}/sidebodypix-colored-mask.jpg',side_colored_mask)
|
| 49 |
-
|
| 50 |
frontposes = frontresult.get_poses()
|
| 51 |
front_image_with_poses = draw_poses(
|
| 52 |
-
fimage_array.copy(),
|
| 53 |
frontposes,
|
| 54 |
keypoints_color=(255, 100, 100),
|
| 55 |
skeleton_color=(100, 100, 255)
|
|
@@ -57,35 +114,46 @@ def process_images(front_img, side_img, real_height_cm):
|
|
| 57 |
|
| 58 |
sideposes = sideresult.get_poses()
|
| 59 |
side_image_with_poses = draw_poses(
|
| 60 |
-
simage_array.copy(),
|
| 61 |
sideposes,
|
| 62 |
keypoints_color=(255, 100, 100),
|
| 63 |
skeleton_color=(100, 100, 255)
|
| 64 |
)
|
| 65 |
-
# print(np.array(simage).shape)
|
| 66 |
-
# print(np.array(side_colored_mask).shape)
|
| 67 |
-
|
| 68 |
-
# preprocessing.image.save_img(f'{output_path}/frontbodypix-poses.jpg', front_image_with_poses)
|
| 69 |
-
# preprocessing.image.save_img(f'{output_path}/sidebodypix-poses.jpg', side_image_with_poses)
|
| 70 |
|
| 71 |
body_sizes = measure_body_sizes(side_colored_mask, front_colored_mask, sideposes, frontposes, real_height_cm, rainbow)
|
| 72 |
measurements_df = pd.DataFrame([body_sizes[0]])
|
| 73 |
return measurements_df
|
| 74 |
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
)
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import cv2
|
| 3 |
+
import time
|
| 4 |
+
import threading
|
| 5 |
+
import numpy as np
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from gtts import gTTS
|
| 8 |
+
import pygame
|
| 9 |
+
from tensorflow.keras import preprocessing
|
| 10 |
import tensorflow as tf
|
| 11 |
import tf_bodypix
|
| 12 |
from tf_bodypix.api import download_model, load_model, BodyPixModelPaths
|
| 13 |
from tf_bodypix.draw import draw_poses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
import pandas as pd
|
| 15 |
+
from calculations import measure_body_sizes
|
| 16 |
|
| 17 |
# Load BodyPix model
|
| 18 |
bodypix_model = load_model(download_model(BodyPixModelPaths.MOBILENET_FLOAT_50_STRIDE_16))
|
| 19 |
|
| 20 |
rainbow = [
|
| 21 |
+
[110, 64, 170], [143, 61, 178], [178, 60, 178], [210, 62, 167],
|
| 22 |
+
[238, 67, 149], [255, 78, 125], [255, 94, 99], [255, 115, 75],
|
| 23 |
+
[255, 140, 56], [239, 167, 47], [217, 194, 49], [194, 219, 64],
|
| 24 |
+
[175, 240, 91], [135, 245, 87], [96, 247, 96], [64, 243, 115],
|
| 25 |
+
[40, 234, 141], [28, 219, 169], [26, 199, 194], [33, 176, 213],
|
| 26 |
+
[47, 150, 224], [65, 125, 224], [84, 101, 214], [99, 81, 195]
|
| 27 |
]
|
| 28 |
|
| 29 |
+
# Create output directory
|
| 30 |
+
output_dir = "output_images"
|
| 31 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 32 |
+
|
| 33 |
+
def save_image(image, filename):
|
| 34 |
+
filepath = os.path.join(output_dir, filename)
|
| 35 |
+
cv2.imwrite(filepath, image)
|
| 36 |
+
print(f"Image saved as {filepath}")
|
| 37 |
+
|
| 38 |
+
def draw_bounding_box(frame, person_height_cm, pixels_per_cm):
|
| 39 |
+
frame_height, frame_width, _ = frame.shape
|
| 40 |
+
box_height = int(2 * person_height_cm * pixels_per_cm)
|
| 41 |
+
box_width = int(2 * person_height_cm * pixels_per_cm)
|
| 42 |
+
center_x, center_y = frame_width // 2, frame_height // 2
|
| 43 |
+
top_left_x = max(center_x - box_width // 2, 0)
|
| 44 |
+
top_left_y = max(center_y - box_height // 2, 0)
|
| 45 |
+
bottom_right_x = min(center_x + box_width // 2, frame_width)
|
| 46 |
+
bottom_right_y = min(center_y + box_height // 2, frame_height)
|
| 47 |
+
color = (0, 255, 0)
|
| 48 |
+
thickness = 2
|
| 49 |
+
cv2.rectangle(frame, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), color, thickness)
|
| 50 |
+
|
| 51 |
+
def play_audio_instruction(text, filename):
|
| 52 |
+
tts = gTTS(text=text, lang='en')
|
| 53 |
+
tts.save(filename)
|
| 54 |
+
pygame.mixer.init()
|
| 55 |
+
pygame.mixer.music.load(filename)
|
| 56 |
+
pygame.mixer.music.play()
|
| 57 |
+
|
| 58 |
+
def play_countdown_audio():
|
| 59 |
+
for i in range(20, 0, -1):
|
| 60 |
+
tts = gTTS(text=str(i), lang='en')
|
| 61 |
+
filename = f"input1/countdown_{i}.mp3"
|
| 62 |
+
tts.save(filename)
|
| 63 |
+
pygame.mixer.music.load(filename)
|
| 64 |
+
pygame.mixer.music.play()
|
| 65 |
+
while pygame.mixer.music.get_busy():
|
| 66 |
+
time.sleep(0.1)
|
| 67 |
+
|
| 68 |
+
def capture_image(label, camera, person_height_cm, pixels_per_cm):
|
| 69 |
+
print(f"Please position yourself for the {label} pose.")
|
| 70 |
+
instruction_text = f"Please stand straight in the box for the {label} pose."
|
| 71 |
+
audio_file = f"input1/instruction_{label}.mp3"
|
| 72 |
+
threading.Thread(target=play_audio_instruction, args=(instruction_text, audio_file)).start()
|
| 73 |
+
countdown_thread = threading.Thread(target=play_countdown_audio)
|
| 74 |
+
countdown_thread.start()
|
| 75 |
+
|
| 76 |
+
for i in range(20, 0, -1):
|
| 77 |
+
ret, frame = camera.read()
|
| 78 |
+
if not ret:
|
| 79 |
+
print("Failed to capture frame.")
|
| 80 |
+
return None
|
| 81 |
+
draw_bounding_box(frame, person_height_cm, pixels_per_cm)
|
| 82 |
+
cv2.putText(frame, f"{label.capitalize()} pose: {i} seconds remaining", (10, 30),
|
| 83 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
|
| 84 |
+
cv2.imshow("Pose Capture", frame)
|
| 85 |
+
if cv2.waitKey(1000) & 0xFF == ord('q'):
|
| 86 |
+
break
|
| 87 |
+
countdown_thread.join()
|
| 88 |
+
ret, frame = camera.read()
|
| 89 |
+
if ret:
|
| 90 |
+
save_image(frame, f"output/{label}_image.jpg")
|
| 91 |
+
return frame
|
| 92 |
+
|
| 93 |
def process_images(front_img, side_img, real_height_cm):
|
| 94 |
fimage_array = preprocessing.image.img_to_array(front_img)
|
| 95 |
simage_array = preprocessing.image.img_to_array(side_img)
|
|
|
|
| 101 |
front_mask = frontresult.get_mask(threshold=0.75)
|
| 102 |
side_mask = sideresult.get_mask(threshold=0.75)
|
| 103 |
|
|
|
|
|
|
|
|
|
|
| 104 |
front_colored_mask = frontresult.get_colored_part_mask(front_mask, rainbow)
|
| 105 |
side_colored_mask = sideresult.get_colored_part_mask(side_mask, rainbow)
|
| 106 |
|
|
|
|
|
|
|
|
|
|
| 107 |
frontposes = frontresult.get_poses()
|
| 108 |
front_image_with_poses = draw_poses(
|
| 109 |
+
fimage_array.copy(),
|
| 110 |
frontposes,
|
| 111 |
keypoints_color=(255, 100, 100),
|
| 112 |
skeleton_color=(100, 100, 255)
|
|
|
|
| 114 |
|
| 115 |
sideposes = sideresult.get_poses()
|
| 116 |
side_image_with_poses = draw_poses(
|
| 117 |
+
simage_array.copy(),
|
| 118 |
sideposes,
|
| 119 |
keypoints_color=(255, 100, 100),
|
| 120 |
skeleton_color=(100, 100, 255)
|
| 121 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
body_sizes = measure_body_sizes(side_colored_mask, front_colored_mask, sideposes, frontposes, real_height_cm, rainbow)
|
| 124 |
measurements_df = pd.DataFrame([body_sizes[0]])
|
| 125 |
return measurements_df
|
| 126 |
|
| 127 |
+
def gradio_interface(person_height_cm):
|
| 128 |
+
cap = cv2.VideoCapture(0)
|
| 129 |
+
if not cap.isOpened():
|
| 130 |
+
return "Error: Unable to access the camera."
|
| 131 |
+
|
| 132 |
+
pixels_per_cm = 5
|
| 133 |
+
front_frame = capture_image("front", cap, person_height_cm, pixels_per_cm)
|
| 134 |
+
side_frame = capture_image("side", cap, person_height_cm, pixels_per_cm)
|
| 135 |
+
|
| 136 |
+
cap.release()
|
| 137 |
+
cv2.destroyAllWindows()
|
| 138 |
+
|
| 139 |
+
# Load the captured images to process for body measurement
|
| 140 |
+
front_img = cv2.imread(os.path.join(output_dir, "output/front_image.jpg"))
|
| 141 |
+
side_img = cv2.imread(os.path.join(output_dir, "output/side_image.jpg"))
|
| 142 |
+
|
| 143 |
+
# Process images and return measurement DataFrame
|
| 144 |
+
measurements_df = process_images(front_img, side_img, person_height_cm)
|
| 145 |
+
return measurements_df
|
| 146 |
+
|
| 147 |
+
def main():
|
| 148 |
+
# Updated Gradio interface with separate button trigger
|
| 149 |
+
with gr.Blocks() as demo:
|
| 150 |
+
height_input = gr.Number(label="Enter your height (in cm)")
|
| 151 |
+
capture_button = gr.Button("Capture Images")
|
| 152 |
+
output_df = gr.DataFrame()
|
| 153 |
+
|
| 154 |
+
capture_button.click(fn=gradio_interface, inputs=height_input, outputs=output_df)
|
| 155 |
+
|
| 156 |
+
demo.launch()
|
| 157 |
+
|
| 158 |
+
if __name__ == "__main__":
|
| 159 |
+
main()
|