Spaces:
Running
on
Zero
Running
on
Zero
Update video_depth_anything/video_depth.py
Browse files
video_depth_anything/video_depth.py
CHANGED
|
@@ -64,7 +64,7 @@ class VideoDepthAnything(nn.Module):
|
|
| 64 |
depth = F.relu(depth)
|
| 65 |
return depth.squeeze(1).unflatten(0, (B, T)) # return shape [B, T, H, W]
|
| 66 |
|
| 67 |
-
def infer_video_depth(self, frames, target_fps, input_size=518, device='cuda'
|
| 68 |
frame_height, frame_width = frames[0].shape[:2]
|
| 69 |
ratio = max(frame_height, frame_width) / min(frame_height, frame_width)
|
| 70 |
if ratio > 1.78:
|
|
@@ -95,7 +95,7 @@ class VideoDepthAnything(nn.Module):
|
|
| 95 |
|
| 96 |
depth_list = []
|
| 97 |
pre_input = None
|
| 98 |
-
for frame_id in
|
| 99 |
cur_list = []
|
| 100 |
for i in range(INFER_LEN):
|
| 101 |
cur_list.append(torch.from_numpy(transform({'image': frame_list[frame_id+i].astype(np.float32) / 255.0})['image']).unsqueeze(0).unsqueeze(0))
|
|
|
|
| 64 |
depth = F.relu(depth)
|
| 65 |
return depth.squeeze(1).unflatten(0, (B, T)) # return shape [B, T, H, W]
|
| 66 |
|
| 67 |
+
def infer_video_depth(self, frames, target_fps, input_size=518, device='cuda'):
|
| 68 |
frame_height, frame_width = frames[0].shape[:2]
|
| 69 |
ratio = max(frame_height, frame_width) / min(frame_height, frame_width)
|
| 70 |
if ratio > 1.78:
|
|
|
|
| 95 |
|
| 96 |
depth_list = []
|
| 97 |
pre_input = None
|
| 98 |
+
for frame_id in tqdm(range(0, org_video_len, frame_step)):
|
| 99 |
cur_list = []
|
| 100 |
for i in range(INFER_LEN):
|
| 101 |
cur_list.append(torch.from_numpy(transform({'image': frame_list[frame_id+i].astype(np.float32) / 255.0})['image']).unsqueeze(0).unsqueeze(0))
|