yonigozlan HF Staff commited on
Commit
03b77e4
·
1 Parent(s): f38b48d
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -509,8 +509,8 @@ def on_image_click(
509
  @spaces.GPU()
510
  def propagate_masks(GLOBAL_STATE: gr.State):
511
  if GLOBAL_STATE is None or GLOBAL_STATE.inference_session is None:
512
- # yield GLOBAL_STATE, "Load a video first.", gr.update()
513
- return GLOBAL_STATE, "Load a video first.", gr.update()
514
 
515
  processor = deepcopy(GLOBAL_STATE.processor)
516
  model = deepcopy(GLOBAL_STATE.model)
@@ -524,7 +524,7 @@ def propagate_masks(GLOBAL_STATE: gr.State):
524
  processed = 0
525
 
526
  # Initial status; no slider change yet
527
- # yield GLOBAL_STATE, f"Propagating masks: {processed}/{total}", gr.update()
528
 
529
  last_frame_idx = 0
530
  with torch.inference_mode():
@@ -547,16 +547,15 @@ def propagate_masks(GLOBAL_STATE: gr.State):
547
 
548
  processed += 1
549
  # Every 15th frame (or last), move slider to current frame to update preview via slider binding
550
- # if processed % 15 == 0 or processed == total:
551
- # yield GLOBAL_STATE, f"Propagating masks: {processed}/{total}", gr.update(value=frame_idx)
552
- # else:
553
- # yield GLOBAL_STATE, f"Propagating masks: {processed}/{total}", gr.update()
554
 
555
  text = f"Propagated masks across {processed} frames for {len(inference_session.obj_ids)} objects."
556
 
557
  # Final status; ensure slider points to last processed frame
558
- # yield GLOBAL_STATE, text, gr.update(value=last_frame_idx)
559
- return GLOBAL_STATE, text, gr.update(value=last_frame_idx)
560
 
561
 
562
  def reset_session(GLOBAL_STATE: gr.State) -> tuple[AppState, Image.Image, int, int, str]:
@@ -812,7 +811,7 @@ with gr.Blocks(title="SAM2 Video (Transformers) - Interactive Segmentation", the
812
  propagate_btn.click(
813
  propagate_masks,
814
  inputs=[GLOBAL_STATE],
815
- outputs=[GLOBAL_STATE, propagate_status, frame_slider],
816
  )
817
 
818
  reset_btn.click(
 
509
  @spaces.GPU()
510
  def propagate_masks(GLOBAL_STATE: gr.State):
511
  if GLOBAL_STATE is None or GLOBAL_STATE.inference_session is None:
512
+ yield "Load a video first.", gr.update()
513
+ return
514
 
515
  processor = deepcopy(GLOBAL_STATE.processor)
516
  model = deepcopy(GLOBAL_STATE.model)
 
524
  processed = 0
525
 
526
  # Initial status; no slider change yet
527
+ yield f"Propagating masks: {processed}/{total}", gr.update()
528
 
529
  last_frame_idx = 0
530
  with torch.inference_mode():
 
547
 
548
  processed += 1
549
  # Every 15th frame (or last), move slider to current frame to update preview via slider binding
550
+ if processed % 15 == 0 or processed == total:
551
+ yield f"Propagating masks: {processed}/{total}", gr.update(value=frame_idx)
552
+ else:
553
+ yield f"Propagating masks: {processed}/{total}", gr.update()
554
 
555
  text = f"Propagated masks across {processed} frames for {len(inference_session.obj_ids)} objects."
556
 
557
  # Final status; ensure slider points to last processed frame
558
+ yield text, gr.update(value=last_frame_idx)
 
559
 
560
 
561
  def reset_session(GLOBAL_STATE: gr.State) -> tuple[AppState, Image.Image, int, int, str]:
 
811
  propagate_btn.click(
812
  propagate_masks,
813
  inputs=[GLOBAL_STATE],
814
+ outputs=[propagate_status, frame_slider],
815
  )
816
 
817
  reset_btn.click(