chenemii commited on
Commit
6515800
·
1 Parent(s): 43be61b

frame size

Browse files
Files changed (1) hide show
  1. app/streamlit_app.py +11 -29
app/streamlit_app.py CHANGED
@@ -250,7 +250,7 @@ def main():
250
  with st.spinner("Processing video and detecting objects..."):
251
  frames, detections = process_video(video_path,
252
  sample_rate=sample_rate)
253
- st.success(f"Processed {len(frames)} frames")
254
 
255
  # Step 2: Analyze pose
256
  with st.spinner("Analyzing golfer's pose..."):
@@ -314,32 +314,6 @@ def main():
314
 
315
  # Show action buttons and their results (only if analysis is complete)
316
  if st.session_state.video_analyzed:
317
- # Display swing phases
318
- if 'swing_phases' in st.session_state.analysis_data:
319
- swing_phases = st.session_state.analysis_data['swing_phases']
320
- st.subheader("Swing Phases")
321
- phase_cols = st.columns(5)
322
- for i, (phase, frames_in_phase) in enumerate(swing_phases.items()):
323
- with phase_cols[i]:
324
- st.metric(label=phase.capitalize(),
325
- value=f"{len(frames_in_phase)} frames")
326
-
327
- # Display club speed if available
328
- if 'trajectory_data' in st.session_state.analysis_data and 'swing_phases' in st.session_state.analysis_data:
329
- trajectory_data = st.session_state.analysis_data['trajectory_data']
330
- swing_phases = st.session_state.analysis_data['swing_phases']
331
- impact_frames = swing_phases.get("impact", [])
332
- if impact_frames:
333
- impact_frame = impact_frames[len(impact_frames) // 2]
334
- if impact_frame in trajectory_data and trajectory_data[
335
- impact_frame].get("club_speed"):
336
- st.subheader("Club Speed")
337
- st.metric(
338
- label="Estimated Club Speed",
339
- value=
340
- f"{trajectory_data[impact_frame]['club_speed']:.1f} mph"
341
- )
342
-
343
  # Display the GPT prompt in an expander
344
  if 'prompt' in st.session_state.analysis_data:
345
  with st.expander("View LLM Prompt", expanded=False):
@@ -531,11 +505,19 @@ def main():
531
  height, width = rgb_frame.shape[:2]
532
  print(f"Frame dimensions for {phase}: {width}x{height}")
533
 
 
 
 
 
 
 
534
  pil_img = Image.fromarray(rgb_frame)
 
 
535
  pil_img.save(temp_file.name, format="JPEG", quality=95)
536
 
537
- # Display the image
538
- st.image(temp_file.name, use_container_width=True)
539
 
540
  # Clean up temp file
541
  try:
 
250
  with st.spinner("Processing video and detecting objects..."):
251
  frames, detections = process_video(video_path,
252
  sample_rate=sample_rate)
253
+ st.success("Video processing complete!")
254
 
255
  # Step 2: Analyze pose
256
  with st.spinner("Analyzing golfer's pose..."):
 
314
 
315
  # Show action buttons and their results (only if analysis is complete)
316
  if st.session_state.video_analyzed:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  # Display the GPT prompt in an expander
318
  if 'prompt' in st.session_state.analysis_data:
319
  with st.expander("View LLM Prompt", expanded=False):
 
505
  height, width = rgb_frame.shape[:2]
506
  print(f"Frame dimensions for {phase}: {width}x{height}")
507
 
508
+ # Resize frame proportionally for better display
509
+ # Target width of 400 pixels while maintaining aspect ratio
510
+ target_width = 400
511
+ aspect_ratio = height / width
512
+ target_height = int(target_width * aspect_ratio)
513
+
514
  pil_img = Image.fromarray(rgb_frame)
515
+ # Resize the image proportionally
516
+ pil_img = pil_img.resize((target_width, target_height), Image.Resampling.LANCZOS)
517
  pil_img.save(temp_file.name, format="JPEG", quality=95)
518
 
519
+ # Display the image with fixed width
520
+ st.image(temp_file.name, width=target_width)
521
 
522
  # Clean up temp file
523
  try: