themalinery commited on
Commit
5deabf9
·
1 Parent(s): 6802e41

option for choosing landmark shape

Browse files
Files changed (2) hide show
  1. app.py +16 -5
  2. src/utils.py +12 -3
app.py CHANGED
@@ -61,6 +61,7 @@ def process_video(
61
  landmark_color,
62
  connection_color,
63
  connection_thickness,
 
64
  ):
65
  """
66
  Process video with body pose estimation and return output video path.
@@ -86,7 +87,8 @@ def process_video(
86
 
87
  output_dir = temp_dir / "output"
88
  output_dir.mkdir(parents=True, exist_ok=True)
89
- output_video_path = output_dir / "output.mp4"
 
90
 
91
  # Convert colors to BGR tuples
92
  print(f"Raw landmark_color input: {landmark_color} (type: {type(landmark_color)})")
@@ -101,7 +103,8 @@ def process_video(
101
  "color_landmarks": landmark_color_tuple,
102
  "color_connections": connection_color_tuple,
103
  "radius": int(landmark_radius),
104
- "thickness": int(connection_thickness)
 
105
  }
106
 
107
  print(f"Drawing settings: {drawing_settings}")
@@ -163,6 +166,12 @@ with gr.Blocks(title="Body Pose Estimation") as demo:
163
  precision=0
164
  )
165
 
 
 
 
 
 
 
166
  video_upload = gr.File(
167
  label="Upload Video",
168
  file_types=["video"],
@@ -197,12 +206,13 @@ with gr.Blocks(title="Body Pose Estimation") as demo:
197
  )
198
 
199
  # Handle processing
200
- def process_and_update(video, radius, land_color, conn_color, conn_thickness):
201
  try:
202
  # Debug: Print what we receive from the UI
203
  print(f"=== DEBUG: Values received from UI ===")
204
  print(f"land_color: {land_color}")
205
  print(f"conn_color: {conn_color}")
 
206
  print(f"==========================================")
207
 
208
  # Update status
@@ -213,7 +223,8 @@ with gr.Blocks(title="Body Pose Estimation") as demo:
213
  int(radius),
214
  land_color,
215
  conn_color,
216
- int(conn_thickness)
 
217
  )
218
 
219
  gr.Info("Video processing complete!")
@@ -224,7 +235,7 @@ with gr.Blocks(title="Body Pose Estimation") as demo:
224
 
225
  process_button.click(
226
  fn=process_and_update,
227
- inputs=[video_upload, landmark_radius, landmark_color, connection_color, connection_thickness],
228
  outputs=[video_output, download_button]
229
  )
230
 
 
61
  landmark_color,
62
  connection_color,
63
  connection_thickness,
64
+ landmark_shape,
65
  ):
66
  """
67
  Process video with body pose estimation and return output video path.
 
87
 
88
  output_dir = temp_dir / "output"
89
  output_dir.mkdir(parents=True, exist_ok=True)
90
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
91
+ output_video_path = output_dir / f"output_{timestamp}.mp4"
92
 
93
  # Convert colors to BGR tuples
94
  print(f"Raw landmark_color input: {landmark_color} (type: {type(landmark_color)})")
 
103
  "color_landmarks": landmark_color_tuple,
104
  "color_connections": connection_color_tuple,
105
  "radius": int(landmark_radius),
106
+ "thickness": int(connection_thickness),
107
+ "shape": landmark_shape
108
  }
109
 
110
  print(f"Drawing settings: {drawing_settings}")
 
166
  precision=0
167
  )
168
 
169
+ landmark_shape = gr.Dropdown(
170
+ choices=["heart", "star"],
171
+ value="heart",
172
+ label="Landmark Shape"
173
+ )
174
+
175
  video_upload = gr.File(
176
  label="Upload Video",
177
  file_types=["video"],
 
206
  )
207
 
208
  # Handle processing
209
+ def process_and_update(video, radius, land_color, conn_color, conn_thickness, shape):
210
  try:
211
  # Debug: Print what we receive from the UI
212
  print(f"=== DEBUG: Values received from UI ===")
213
  print(f"land_color: {land_color}")
214
  print(f"conn_color: {conn_color}")
215
+ print(f"shape: {shape}")
216
  print(f"==========================================")
217
 
218
  # Update status
 
223
  int(radius),
224
  land_color,
225
  conn_color,
226
+ int(conn_thickness),
227
+ shape
228
  )
229
 
230
  gr.Info("Video processing complete!")
 
235
 
236
  process_button.click(
237
  fn=process_and_update,
238
+ inputs=[video_upload, landmark_radius, landmark_color, connection_color, connection_thickness, landmark_shape],
239
  outputs=[video_output, download_button]
240
  )
241
 
src/utils.py CHANGED
@@ -18,6 +18,7 @@ from transformers import (
18
  infer_device,
19
  )
20
  from src.body_pose.vertex_annotator_heart import VertexAnnotatorHeart
 
21
 
22
  # Import mediapipe with fallback
23
  try:
@@ -239,9 +240,17 @@ def process_body_pose_estimation(path_video, output_folder, drawing_settings):
239
  key_points = sv.KeyPoints(xy=xy, confidence=scores)
240
 
241
  edge_annotator = sv.EdgeAnnotator(color=color_edge_annotator, thickness=drawing_settings.get("thickness"))
242
- vertex_annotator = VertexAnnotatorHeart(
243
- color=color_vertex_annotator, radius=drawing_settings.get("radius")
244
- )
 
 
 
 
 
 
 
 
245
 
246
  annotated_frame = edge_annotator.annotate(
247
  scene=frame.copy(), key_points=key_points
 
18
  infer_device,
19
  )
20
  from src.body_pose.vertex_annotator_heart import VertexAnnotatorHeart
21
+ from src.body_pose.vertex_annotator_star import VertexAnnotatorStar
22
 
23
  # Import mediapipe with fallback
24
  try:
 
240
  key_points = sv.KeyPoints(xy=xy, confidence=scores)
241
 
242
  edge_annotator = sv.EdgeAnnotator(color=color_edge_annotator, thickness=drawing_settings.get("thickness"))
243
+
244
+ # Select vertex annotator based on shape parameter
245
+ shape = drawing_settings.get("shape", "heart")
246
+ if shape == "star":
247
+ vertex_annotator = VertexAnnotatorStar(
248
+ color=color_vertex_annotator, radius=drawing_settings.get("radius")
249
+ )
250
+ else:
251
+ vertex_annotator = VertexAnnotatorHeart(
252
+ color=color_vertex_annotator, radius=drawing_settings.get("radius")
253
+ )
254
 
255
  annotated_frame = edge_annotator.annotate(
256
  scene=frame.copy(), key_points=key_points