Commit
·
d1059e1
1
Parent(s):
4563224
ddd
Browse files- handler.py +3 -1
handler.py
CHANGED
|
@@ -112,7 +112,7 @@ class EndpointHandler:
|
|
| 112 |
|
| 113 |
vr = VideoReader(video_path, ctx=cpu(0))
|
| 114 |
total_frames = len(vr)
|
| 115 |
-
indices =
|
| 116 |
frames = [Image.fromarray(vr[i].asnumpy()) for i in indices]
|
| 117 |
|
| 118 |
transform = T.Compose([
|
|
@@ -123,6 +123,8 @@ class EndpointHandler:
|
|
| 123 |
])
|
| 124 |
processed = [transform(frame) for frame in frames] # each is [3, 224, 224]
|
| 125 |
video_tensor = torch.stack(processed, dim=0) # [T, 3, 224, 224]
|
|
|
|
|
|
|
| 126 |
return video_tensor
|
| 127 |
|
| 128 |
def get_index(self, num_frames: int, num_segments: int):
|
|
|
|
| 112 |
|
| 113 |
vr = VideoReader(video_path, ctx=cpu(0))
|
| 114 |
total_frames = len(vr)
|
| 115 |
+
indices = get_index(total_frames, num_segments)
|
| 116 |
frames = [Image.fromarray(vr[i].asnumpy()) for i in indices]
|
| 117 |
|
| 118 |
transform = T.Compose([
|
|
|
|
| 123 |
])
|
| 124 |
processed = [transform(frame) for frame in frames] # each is [3, 224, 224]
|
| 125 |
video_tensor = torch.stack(processed, dim=0) # [T, 3, 224, 224]
|
| 126 |
+
video_tensor = video_tensor.unsqueeze(0) # [1, T, 3, 224, 224]
|
| 127 |
+
video_tensor = video_tensor.permute(0, 2, 1, 3, 4) # 💥 [1, 3, T, 224, 224]
|
| 128 |
return video_tensor
|
| 129 |
|
| 130 |
def get_index(self, num_frames: int, num_segments: int):
|