Mk1443 commited on
Commit
f22f246
·
verified ·
1 Parent(s): 39bc6ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -84
app.py CHANGED
@@ -4,28 +4,16 @@ import time
4
  import threading
5
  import numpy as np
6
  import gradio as gr
7
- from gtts import gTTS
8
- import pygame
9
  from tensorflow.keras import preprocessing
10
- import tensorflow as tf
 
11
  import tf_bodypix
12
  from tf_bodypix.api import download_model, load_model, BodyPixModelPaths
13
  from tf_bodypix.draw import draw_poses
14
- import pandas as pd
15
- from calculations import measure_body_sizes
16
 
17
  # Load BodyPix model
18
  bodypix_model = load_model(download_model(BodyPixModelPaths.MOBILENET_FLOAT_50_STRIDE_16))
19
 
20
- rainbow = [
21
- [110, 64, 170], [143, 61, 178], [178, 60, 178], [210, 62, 167],
22
- [238, 67, 149], [255, 78, 125], [255, 94, 99], [255, 115, 75],
23
- [255, 140, 56], [239, 167, 47], [217, 194, 49], [194, 219, 64],
24
- [175, 240, 91], [135, 245, 87], [96, 247, 96], [64, 243, 115],
25
- [40, 234, 141], [28, 219, 169], [26, 199, 194], [33, 176, 213],
26
- [47, 150, 224], [65, 125, 224], [84, 101, 214], [99, 81, 195]
27
- ]
28
-
29
  # Create output directory
30
  output_dir = "output_images"
31
  os.makedirs(output_dir, exist_ok=True)
@@ -35,10 +23,8 @@ def save_image(image, filename):
35
  cv2.imwrite(filepath, image)
36
  print(f"Image saved as {filepath}")
37
 
38
- def draw_bounding_box(frame, person_height_cm, pixels_per_cm):
39
  frame_height, frame_width, _ = frame.shape
40
- box_height = int(2 * person_height_cm * pixels_per_cm)
41
- box_width = int(2 * person_height_cm * pixels_per_cm)
42
  center_x, center_y = frame_width // 2, frame_height // 2
43
  top_left_x = max(center_x - box_width // 2, 0)
44
  top_left_y = max(center_y - box_height // 2, 0)
@@ -48,105 +34,88 @@ def draw_bounding_box(frame, person_height_cm, pixels_per_cm):
48
  thickness = 2
49
  cv2.rectangle(frame, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), color, thickness)
50
 
51
- def play_audio_instruction(text, filename):
52
- tts = gTTS(text=text, lang='en')
53
- tts.save(filename)
54
- pygame.mixer.init()
55
- pygame.mixer.music.load(filename)
56
- pygame.mixer.music.play()
57
-
58
  def play_countdown_audio():
59
  for i in range(20, 0, -1):
60
- tts = gTTS(text=str(i), lang='en')
61
- filename = f"input1/countdown_{i}.mp3"
62
- tts.save(filename)
63
- pygame.mixer.music.load(filename)
64
- pygame.mixer.music.play()
65
- while pygame.mixer.music.get_busy():
66
- time.sleep(0.1)
67
-
68
- def capture_image(label, person_height_cm, pixels_per_cm):
69
- camera = gr.Button("Capture Images")
 
70
  print(f"Please position yourself for the {label} pose.")
71
- instruction_text = f"Please stand straight in the box for the {label} pose."
72
- audio_file = f"input1/instruction_{label}.mp3"
73
- threading.Thread(target=play_audio_instruction, args=(instruction_text, audio_file)).start()
 
74
  countdown_thread = threading.Thread(target=play_countdown_audio)
75
  countdown_thread.start()
76
 
77
  for i in range(20, 0, -1):
78
- ret, frame = camera.read()
79
  if not ret:
80
  print("Failed to capture frame.")
81
- return None
82
- draw_bounding_box(frame, person_height_cm, pixels_per_cm)
83
  cv2.putText(frame, f"{label.capitalize()} pose: {i} seconds remaining", (10, 30),
84
  cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
85
  cv2.imshow("Pose Capture", frame)
86
  if cv2.waitKey(1000) & 0xFF == ord('q'):
87
  break
 
88
  countdown_thread.join()
89
- ret, frame = camera.read()
90
  if ret:
91
- save_image(frame, f"output/{label}_image.jpg")
92
- return frame
 
 
 
 
93
 
94
  def process_images(front_img, side_img, real_height_cm):
95
- fimage_array = preprocessing.image.img_to_array(front_img)
96
- simage_array = preprocessing.image.img_to_array(side_img)
97
-
98
- # bodypix prediction
99
- frontresult = bodypix_model.predict_single(fimage_array)
100
- sideresult = bodypix_model.predict_single(simage_array)
101
-
102
- front_mask = frontresult.get_mask(threshold=0.75)
103
- side_mask = sideresult.get_mask(threshold=0.75)
104
-
105
- front_colored_mask = frontresult.get_colored_part_mask(front_mask, rainbow)
106
- side_colored_mask = sideresult.get_colored_part_mask(side_mask, rainbow)
107
-
108
- frontposes = frontresult.get_poses()
109
- front_image_with_poses = draw_poses(
110
- fimage_array.copy(),
111
- frontposes,
112
- keypoints_color=(255, 100, 100),
113
- skeleton_color=(100, 100, 255)
114
- )
115
-
116
- sideposes = sideresult.get_poses()
117
- side_image_with_poses = draw_poses(
118
- simage_array.copy(),
119
- sideposes,
120
- keypoints_color=(255, 100, 100),
121
- skeleton_color=(100, 100, 255)
122
- )
123
-
124
- body_sizes = measure_body_sizes(side_colored_mask, front_colored_mask, sideposes, frontposes, real_height_cm, rainbow)
125
  measurements_df = pd.DataFrame([body_sizes[0]])
126
  return measurements_df
127
 
128
  def gradio_interface(person_height_cm):
129
-
130
- # if not cap.isOpened():
131
- # return "Error: Unable to access the camera."
132
-
133
  pixels_per_cm = 5
134
- front_frame = capture_image("front", person_height_cm, pixels_per_cm)
135
- side_frame = capture_image("side", person_height_cm, pixels_per_cm)
136
 
 
 
 
 
 
137
 
138
- # Load the captured images to process for body measurement
139
- front_img = cv2.imread(os.path.join(output_dir, "output/front_image.jpg"))
140
- side_img = cv2.imread(os.path.join(output_dir, "output/side_image.jpg"))
141
 
142
- # Process images and return measurement DataFrame
143
  measurements_df = process_images(front_img, side_img, person_height_cm)
144
  return measurements_df
145
 
146
  def main():
147
- # Updated Gradio interface with separate button trigger
148
  with gr.Blocks() as demo:
149
  height_input = gr.Number(label="Enter your height (in cm)")
 
150
  output_df = gr.DataFrame()
151
 
152
  capture_button.click(fn=gradio_interface, inputs=height_input, outputs=output_df)
 
4
  import threading
5
  import numpy as np
6
  import gradio as gr
 
 
7
  from tensorflow.keras import preprocessing
8
+ import pandas as pd
9
+ from calculations import measure_body_sizes
10
  import tf_bodypix
11
  from tf_bodypix.api import download_model, load_model, BodyPixModelPaths
12
  from tf_bodypix.draw import draw_poses
 
 
13
 
14
  # Load BodyPix model
15
  bodypix_model = load_model(download_model(BodyPixModelPaths.MOBILENET_FLOAT_50_STRIDE_16))
16
 
 
 
 
 
 
 
 
 
 
17
  # Create output directory
18
  output_dir = "output_images"
19
  os.makedirs(output_dir, exist_ok=True)
 
23
  cv2.imwrite(filepath, image)
24
  print(f"Image saved as {filepath}")
25
 
26
+ def draw_bounding_box(frame, box_width, box_height):
27
  frame_height, frame_width, _ = frame.shape
 
 
28
  center_x, center_y = frame_width // 2, frame_height // 2
29
  top_left_x = max(center_x - box_width // 2, 0)
30
  top_left_y = max(center_y - box_height // 2, 0)
 
34
  thickness = 2
35
  cv2.rectangle(frame, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), color, thickness)
36
 
 
 
 
 
 
 
 
37
  def play_countdown_audio():
38
  for i in range(20, 0, -1):
39
+ audio_file = f"input1/countdown_{i}.mp3"
40
+ if os.path.exists(audio_file):
41
+ os.system(f"mpg123 {audio_file}")
42
+ time.sleep(1)
43
+
44
+ def capture_image_with_timer(label, person_height_cm, pixels_per_cm):
45
+ cap = cv2.VideoCapture(0)
46
+ if not cap.isOpened():
47
+ print("Error: Unable to access the camera.")
48
+ return None
49
+
50
  print(f"Please position yourself for the {label} pose.")
51
+
52
+ box_height = int(2 * person_height_cm * pixels_per_cm)
53
+ box_width = int(2 * person_height_cm * pixels_per_cm)
54
+
55
  countdown_thread = threading.Thread(target=play_countdown_audio)
56
  countdown_thread.start()
57
 
58
  for i in range(20, 0, -1):
59
+ ret, frame = cap.read()
60
  if not ret:
61
  print("Failed to capture frame.")
62
+ break
63
+ draw_bounding_box(frame, box_width, box_height)
64
  cv2.putText(frame, f"{label.capitalize()} pose: {i} seconds remaining", (10, 30),
65
  cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
66
  cv2.imshow("Pose Capture", frame)
67
  if cv2.waitKey(1000) & 0xFF == ord('q'):
68
  break
69
+
70
  countdown_thread.join()
71
+ ret, final_frame = cap.read()
72
  if ret:
73
+ filename = f"{label}_image.jpg"
74
+ save_image(final_frame, filename)
75
+
76
+ cap.release()
77
+ cv2.destroyAllWindows()
78
+ return final_frame
79
 
80
  def process_images(front_img, side_img, real_height_cm):
81
+ front_image_array = preprocessing.image.img_to_array(front_img)
82
+ side_image_array = preprocessing.image.img_to_array(side_img)
83
+
84
+ front_result = bodypix_model.predict_single(front_image_array)
85
+ side_result = bodypix_model.predict_single(side_image_array)
86
+
87
+ front_mask = front_result.get_mask(threshold=0.75)
88
+ side_mask = side_result.get_mask(threshold=0.75)
89
+
90
+ front_colored_mask = front_result.get_colored_part_mask(front_mask)
91
+ side_colored_mask = side_result.get_colored_part_mask(side_mask)
92
+
93
+ front_poses = front_result.get_poses()
94
+ side_poses = side_result.get_poses()
95
+
96
+ body_sizes = measure_body_sizes(side_colored_mask, front_colored_mask, side_poses, front_poses, real_height_cm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  measurements_df = pd.DataFrame([body_sizes[0]])
98
  return measurements_df
99
 
100
  def gradio_interface(person_height_cm):
 
 
 
 
101
  pixels_per_cm = 5
 
 
102
 
103
+ front_frame = capture_image_with_timer("front", person_height_cm, pixels_per_cm)
104
+ side_frame = capture_image_with_timer("side", person_height_cm, pixels_per_cm)
105
+
106
+ front_img_path = os.path.join(output_dir, "front_image.jpg")
107
+ side_img_path = os.path.join(output_dir, "side_image.jpg")
108
 
109
+ front_img = cv2.imread(front_img_path)
110
+ side_img = cv2.imread(side_img_path)
 
111
 
 
112
  measurements_df = process_images(front_img, side_img, person_height_cm)
113
  return measurements_df
114
 
115
  def main():
 
116
  with gr.Blocks() as demo:
117
  height_input = gr.Number(label="Enter your height (in cm)")
118
+ capture_button = gr.Button("Capture Images")
119
  output_df = gr.DataFrame()
120
 
121
  capture_button.click(fn=gradio_interface, inputs=height_input, outputs=output_df)