state buttons
Browse files
app.py
CHANGED
|
@@ -159,16 +159,16 @@ def run_video_inference(
|
|
| 159 |
confidence_threshold: Confidence threshold for filtering predictions
|
| 160 |
|
| 161 |
Returns:
|
| 162 |
-
Tuple of (output_video_path, metrics_text)
|
| 163 |
"""
|
| 164 |
global streaming
|
| 165 |
streaming = True
|
| 166 |
|
| 167 |
if video_path is None:
|
| 168 |
-
return None, "⚠️ Please upload a video first."
|
| 169 |
|
| 170 |
if model_name is None or model_name == "No models available":
|
| 171 |
-
return None, "⚠️ No model selected or available."
|
| 172 |
|
| 173 |
try:
|
| 174 |
# Load model
|
|
@@ -178,7 +178,7 @@ def run_video_inference(
|
|
| 178 |
cap = cv2.VideoCapture(video_path)
|
| 179 |
|
| 180 |
if not cap.isOpened():
|
| 181 |
-
return None, "⚠️ Error: Could not open video file."
|
| 182 |
|
| 183 |
# Get video properties
|
| 184 |
video_codec = cv2.VideoWriter_fourcc(*"mp4v")
|
|
@@ -188,7 +188,7 @@ def run_video_inference(
|
|
| 188 |
# Read first frame to get dimensions
|
| 189 |
ret, frame = cap.read()
|
| 190 |
if not ret or frame is None:
|
| 191 |
-
return None, "⚠️ Error: Could not read video frames."
|
| 192 |
|
| 193 |
# Process first frame to get output dimensions
|
| 194 |
result = model(frame)
|
|
@@ -241,20 +241,28 @@ def run_video_inference(
|
|
| 241 |
|
| 242 |
# Verify final file exists before returning
|
| 243 |
if os.path.exists(output_video_name) and os.path.getsize(output_video_name) > 0:
|
| 244 |
-
return output_video_name, final_metrics
|
| 245 |
else:
|
| 246 |
-
return None, final_metrics + "\n⚠️ Final video file not available."
|
| 247 |
|
| 248 |
except Exception as e:
|
| 249 |
error_msg = f"Error during video inference: {str(e)}"
|
| 250 |
-
return None, error_msg
|
| 251 |
|
| 252 |
|
| 253 |
def stop_video_inference():
|
| 254 |
"""Stop video processing."""
|
| 255 |
global streaming
|
| 256 |
streaming = False
|
| 257 |
-
return "⏹️ Video processing stopped."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
|
| 260 |
def format_results(result, confidence_threshold: float) -> str:
|
|
@@ -423,8 +431,8 @@ def create_gradio_interface():
|
|
| 423 |
)
|
| 424 |
|
| 425 |
with gr.Row():
|
| 426 |
-
video_start_btn = gr.Button("▶️ Start Processing", variant="primary")
|
| 427 |
-
video_stop_btn = gr.Button("⏹️ Stop", variant="stop")
|
| 428 |
|
| 429 |
with gr.Column(scale=1):
|
| 430 |
output_video = gr.Video(
|
|
@@ -440,17 +448,31 @@ def create_gradio_interface():
|
|
| 440 |
max_lines=15
|
| 441 |
)
|
| 442 |
|
| 443 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 444 |
video_start_btn.click(
|
| 445 |
-
fn=
|
| 446 |
inputs=[input_video, video_model_dropdown, video_confidence_slider],
|
| 447 |
-
outputs=[output_video, video_metrics_output]
|
| 448 |
)
|
| 449 |
|
| 450 |
video_stop_btn.click(
|
| 451 |
fn=stop_video_inference,
|
| 452 |
inputs=None,
|
| 453 |
-
outputs=video_metrics_output
|
| 454 |
)
|
| 455 |
|
| 456 |
# Video Examples section
|
|
|
|
| 159 |
confidence_threshold: Confidence threshold for filtering predictions
|
| 160 |
|
| 161 |
Returns:
|
| 162 |
+
Tuple of (output_video_path, metrics_text, start_btn_state, stop_btn_state)
|
| 163 |
"""
|
| 164 |
global streaming
|
| 165 |
streaming = True
|
| 166 |
|
| 167 |
if video_path is None:
|
| 168 |
+
return None, "⚠️ Please upload a video first.", gr.update(interactive=True), gr.update(interactive=False)
|
| 169 |
|
| 170 |
if model_name is None or model_name == "No models available":
|
| 171 |
+
return None, "⚠️ No model selected or available.", gr.update(interactive=True), gr.update(interactive=False)
|
| 172 |
|
| 173 |
try:
|
| 174 |
# Load model
|
|
|
|
| 178 |
cap = cv2.VideoCapture(video_path)
|
| 179 |
|
| 180 |
if not cap.isOpened():
|
| 181 |
+
return None, "⚠️ Error: Could not open video file.", gr.update(interactive=True), gr.update(interactive=False)
|
| 182 |
|
| 183 |
# Get video properties
|
| 184 |
video_codec = cv2.VideoWriter_fourcc(*"mp4v")
|
|
|
|
| 188 |
# Read first frame to get dimensions
|
| 189 |
ret, frame = cap.read()
|
| 190 |
if not ret or frame is None:
|
| 191 |
+
return None, "⚠️ Error: Could not read video frames.", gr.update(interactive=True), gr.update(interactive=False)
|
| 192 |
|
| 193 |
# Process first frame to get output dimensions
|
| 194 |
result = model(frame)
|
|
|
|
| 241 |
|
| 242 |
# Verify final file exists before returning
|
| 243 |
if os.path.exists(output_video_name) and os.path.getsize(output_video_name) > 0:
|
| 244 |
+
return output_video_name, final_metrics, gr.update(interactive=True), gr.update(interactive=False)
|
| 245 |
else:
|
| 246 |
+
return None, final_metrics + "\n⚠️ Final video file not available.", gr.update(interactive=True), gr.update(interactive=False)
|
| 247 |
|
| 248 |
except Exception as e:
|
| 249 |
error_msg = f"Error during video inference: {str(e)}"
|
| 250 |
+
return None, error_msg, gr.update(interactive=True), gr.update(interactive=False)
|
| 251 |
|
| 252 |
|
| 253 |
def stop_video_inference():
|
| 254 |
"""Stop video processing."""
|
| 255 |
global streaming
|
| 256 |
streaming = False
|
| 257 |
+
return "⏹️ Video processing stopped.", gr.update(interactive=True), gr.update(interactive=False)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def enable_video_buttons(video):
|
| 261 |
+
"""Enable start button when video is uploaded."""
|
| 262 |
+
if video is not None:
|
| 263 |
+
return gr.update(interactive=True), gr.update(interactive=False)
|
| 264 |
+
else:
|
| 265 |
+
return gr.update(interactive=False), gr.update(interactive=False)
|
| 266 |
|
| 267 |
|
| 268 |
def format_results(result, confidence_threshold: float) -> str:
|
|
|
|
| 431 |
)
|
| 432 |
|
| 433 |
with gr.Row():
|
| 434 |
+
video_start_btn = gr.Button("▶️ Start Processing", variant="primary", interactive=False)
|
| 435 |
+
video_stop_btn = gr.Button("⏹️ Stop", variant="stop", interactive=False)
|
| 436 |
|
| 437 |
with gr.Column(scale=1):
|
| 438 |
output_video = gr.Video(
|
|
|
|
| 448 |
max_lines=15
|
| 449 |
)
|
| 450 |
|
| 451 |
+
# Enable start button when video is uploaded
|
| 452 |
+
input_video.change(
|
| 453 |
+
fn=enable_video_buttons,
|
| 454 |
+
inputs=[input_video],
|
| 455 |
+
outputs=[video_start_btn, video_stop_btn]
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
# Connect video buttons - when clicked, start is disabled and stop is enabled
|
| 459 |
+
def start_processing_wrapper(video, model, conf):
|
| 460 |
+
# First disable start and enable stop
|
| 461 |
+
yield None, "🔄 Starting video processing...", gr.update(interactive=False), gr.update(interactive=True)
|
| 462 |
+
# Then run the actual processing
|
| 463 |
+
result = run_video_inference(video, model, conf)
|
| 464 |
+
yield result
|
| 465 |
+
|
| 466 |
video_start_btn.click(
|
| 467 |
+
fn=start_processing_wrapper,
|
| 468 |
inputs=[input_video, video_model_dropdown, video_confidence_slider],
|
| 469 |
+
outputs=[output_video, video_metrics_output, video_start_btn, video_stop_btn]
|
| 470 |
)
|
| 471 |
|
| 472 |
video_stop_btn.click(
|
| 473 |
fn=stop_video_inference,
|
| 474 |
inputs=None,
|
| 475 |
+
outputs=[video_metrics_output, video_start_btn, video_stop_btn]
|
| 476 |
)
|
| 477 |
|
| 478 |
# Video Examples section
|