Spaces:
Running
Running
Anthony Liang commited on
Commit ·
5b15392
1
Parent(s): 1886c12
edit app to use frame steps
Browse files
app.py
CHANGED
|
@@ -319,6 +319,7 @@ def process_single_video(
|
|
| 319 |
task_text: str = "Complete the task",
|
| 320 |
server_url: str = "",
|
| 321 |
fps: float = 1.0,
|
|
|
|
| 322 |
) -> Tuple[Optional[str], Optional[str]]:
|
| 323 |
"""Process single video for progress and success predictions using eval server."""
|
| 324 |
# Get server URL from state if not provided
|
|
@@ -365,6 +366,8 @@ def process_single_video(
|
|
| 365 |
|
| 366 |
# Build payload and send to server
|
| 367 |
files, sample_data = build_payload([progress_sample])
|
|
|
|
|
|
|
| 368 |
response = post_batch_npy(server_url, files, sample_data, timeout_s=120.0)
|
| 369 |
|
| 370 |
# Process response
|
|
@@ -426,6 +429,7 @@ def process_two_videos(
|
|
| 426 |
prediction_type: str = "preference",
|
| 427 |
server_url: str = "",
|
| 428 |
fps: float = 1.0,
|
|
|
|
| 429 |
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
| 430 |
"""Process two videos for preference, similarity, or progress prediction using eval server."""
|
| 431 |
# Get server URL from state if not provided
|
|
@@ -523,6 +527,8 @@ def process_two_videos(
|
|
| 523 |
|
| 524 |
# Build payload and send to server
|
| 525 |
files, sample_data = build_payload([progress_sample_a, progress_sample_b])
|
|
|
|
|
|
|
| 526 |
response = post_batch_npy(server_url, files, sample_data, timeout_s=120.0)
|
| 527 |
|
| 528 |
# Process response
|
|
@@ -761,6 +767,11 @@ with demo:
|
|
| 761 |
step=0.1,
|
| 762 |
info="Frames per second to extract from video (higher = more frames)",
|
| 763 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 764 |
analyze_single_btn = gr.Button("Analyze Video", variant="primary")
|
| 765 |
|
| 766 |
gr.Markdown("---")
|
|
@@ -998,7 +1009,7 @@ with demo:
|
|
| 998 |
|
| 999 |
analyze_single_btn.click(
|
| 1000 |
fn=process_single_video,
|
| 1001 |
-
inputs=[single_video_input, task_text_input, server_url_state, fps_input_single],
|
| 1002 |
outputs=[progress_plot, info_output],
|
| 1003 |
api_name="process_single_video",
|
| 1004 |
)
|
|
@@ -1027,6 +1038,11 @@ with demo:
|
|
| 1027 |
step=0.1,
|
| 1028 |
info="Frames per second to extract from videos (higher = more frames)",
|
| 1029 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1030 |
analyze_dual_btn = gr.Button("Compare Videos", variant="primary")
|
| 1031 |
|
| 1032 |
gr.Markdown("---")
|
|
@@ -1476,7 +1492,7 @@ with demo:
|
|
| 1476 |
|
| 1477 |
analyze_dual_btn.click(
|
| 1478 |
fn=process_two_videos,
|
| 1479 |
-
inputs=[video_a_input, video_b_input, task_text_dual, prediction_type, server_url_state, fps_input_dual],
|
| 1480 |
outputs=[result_text, video_a_display, video_b_display],
|
| 1481 |
api_name="process_two_videos",
|
| 1482 |
)
|
|
|
|
| 319 |
task_text: str = "Complete the task",
|
| 320 |
server_url: str = "",
|
| 321 |
fps: float = 1.0,
|
| 322 |
+
use_frame_steps: bool = False,
|
| 323 |
) -> Tuple[Optional[str], Optional[str]]:
|
| 324 |
"""Process single video for progress and success predictions using eval server."""
|
| 325 |
# Get server URL from state if not provided
|
|
|
|
| 366 |
|
| 367 |
# Build payload and send to server
|
| 368 |
files, sample_data = build_payload([progress_sample])
|
| 369 |
+
# Add use_frame_steps flag to sample_data
|
| 370 |
+
sample_data["use_frame_steps"] = use_frame_steps
|
| 371 |
response = post_batch_npy(server_url, files, sample_data, timeout_s=120.0)
|
| 372 |
|
| 373 |
# Process response
|
|
|
|
| 429 |
prediction_type: str = "preference",
|
| 430 |
server_url: str = "",
|
| 431 |
fps: float = 1.0,
|
| 432 |
+
use_frame_steps: bool = False,
|
| 433 |
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
| 434 |
"""Process two videos for preference, similarity, or progress prediction using eval server."""
|
| 435 |
# Get server URL from state if not provided
|
|
|
|
| 527 |
|
| 528 |
# Build payload and send to server
|
| 529 |
files, sample_data = build_payload([progress_sample_a, progress_sample_b])
|
| 530 |
+
# Add use_frame_steps flag to sample_data
|
| 531 |
+
sample_data["use_frame_steps"] = use_frame_steps
|
| 532 |
response = post_batch_npy(server_url, files, sample_data, timeout_s=120.0)
|
| 533 |
|
| 534 |
# Process response
|
|
|
|
| 767 |
step=0.1,
|
| 768 |
info="Frames per second to extract from video (higher = more frames)",
|
| 769 |
)
|
| 770 |
+
use_frame_steps_single = gr.Checkbox(
|
| 771 |
+
label="Use Frame Steps",
|
| 772 |
+
value=False,
|
| 773 |
+
info="Process frames incrementally (0:1, 0:2, 0:3, etc.) for autoregressive predictions",
|
| 774 |
+
)
|
| 775 |
analyze_single_btn = gr.Button("Analyze Video", variant="primary")
|
| 776 |
|
| 777 |
gr.Markdown("---")
|
|
|
|
| 1009 |
|
| 1010 |
analyze_single_btn.click(
|
| 1011 |
fn=process_single_video,
|
| 1012 |
+
inputs=[single_video_input, task_text_input, server_url_state, fps_input_single, use_frame_steps_single],
|
| 1013 |
outputs=[progress_plot, info_output],
|
| 1014 |
api_name="process_single_video",
|
| 1015 |
)
|
|
|
|
| 1038 |
step=0.1,
|
| 1039 |
info="Frames per second to extract from videos (higher = more frames)",
|
| 1040 |
)
|
| 1041 |
+
use_frame_steps_dual = gr.Checkbox(
|
| 1042 |
+
label="Use Frame Steps (Progress mode only)",
|
| 1043 |
+
value=False,
|
| 1044 |
+
info="Process frames incrementally for progress predictions",
|
| 1045 |
+
)
|
| 1046 |
analyze_dual_btn = gr.Button("Compare Videos", variant="primary")
|
| 1047 |
|
| 1048 |
gr.Markdown("---")
|
|
|
|
| 1492 |
|
| 1493 |
analyze_dual_btn.click(
|
| 1494 |
fn=process_two_videos,
|
| 1495 |
+
inputs=[video_a_input, video_b_input, task_text_dual, prediction_type, server_url_state, fps_input_dual, use_frame_steps_dual],
|
| 1496 |
outputs=[result_text, video_a_display, video_b_display],
|
| 1497 |
api_name="process_two_videos",
|
| 1498 |
)
|