yonigozlan HF Staff commited on
Commit
e905684
·
1 Parent(s): 5a06b0b
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -378,7 +378,7 @@ def on_image_click(
378
  inference_session = state.inference_session
379
  original_size = None
380
  pixel_values = None
381
- if not inference_session.processed_frames or frame_idx not in inference_session.processed_frames:
382
  inputs = processor(images=state.video_frames[frame_idx], device=state.device, return_tensors="pt")
383
  original_size = inputs.original_sizes[0]
384
  pixel_values = inputs.pixel_values[0]
@@ -499,7 +499,7 @@ def propagate_masks(GLOBAL_STATE: gr.State):
499
  with torch.inference_mode():
500
  for frame_idx, frame in enumerate(GLOBAL_STATE.video_frames):
501
  pixel_values = None
502
- if not inference_session.processed_frames or frame_idx not in inference_session.processed_frames:
503
  pixel_values = processor(images=frame, device="cuda", return_tensors="pt").pixel_values[0]
504
  sam2_video_output = model(inference_session=inference_session, frame=pixel_values, frame_idx=frame_idx)
505
  H = inference_session.video_height
 
378
  inference_session = state.inference_session
379
  original_size = None
380
  pixel_values = None
381
+ if inference_session.processed_frames is None or frame_idx not in inference_session.processed_frames:
382
  inputs = processor(images=state.video_frames[frame_idx], device=state.device, return_tensors="pt")
383
  original_size = inputs.original_sizes[0]
384
  pixel_values = inputs.pixel_values[0]
 
499
  with torch.inference_mode():
500
  for frame_idx, frame in enumerate(GLOBAL_STATE.video_frames):
501
  pixel_values = None
502
+ if inference_session.processed_frames is None or frame_idx not in inference_session.processed_frames:
503
  pixel_values = processor(images=frame, device="cuda", return_tensors="pt").pixel_values[0]
504
  sam2_video_output = model(inference_session=inference_session, frame=pixel_values, frame_idx=frame_idx)
505
  H = inference_session.video_height