Spaces:
Running on Zero
Running on Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -244,13 +244,12 @@ def extract_video_frames(video_path: str):
|
|
| 244 |
log.info(f"clip_chunk: {clip_chunk.shape}, sync_chunk: {sync_chunk.shape}")
|
| 245 |
return clip_chunk, sync_chunk, duration_sec
|
| 246 |
|
| 247 |
-
|
| 248 |
def extract_features_cpu(clip_chunk, sync_chunk, caption):
|
| 249 |
model = _MODELS["feature_extractor"]
|
| 250 |
|
| 251 |
info = {}
|
| 252 |
with torch.no_grad():
|
| 253 |
-
# videoprism 是 CPU
|
| 254 |
clip_input = torch.from_numpy(clip_chunk).unsqueeze(0)
|
| 255 |
video_feat, frame_embed, _, text_feat = \
|
| 256 |
model.encode_video_and_text_with_videoprism(clip_input, [caption])
|
|
|
|
| 244 |
log.info(f"clip_chunk: {clip_chunk.shape}, sync_chunk: {sync_chunk.shape}")
|
| 245 |
return clip_chunk, sync_chunk, duration_sec
|
| 246 |
|
| 247 |
+
@spaces.GPU
|
| 248 |
def extract_features_cpu(clip_chunk, sync_chunk, caption):
|
| 249 |
model = _MODELS["feature_extractor"]
|
| 250 |
|
| 251 |
info = {}
|
| 252 |
with torch.no_grad():
|
|
|
|
| 253 |
clip_input = torch.from_numpy(clip_chunk).unsqueeze(0)
|
| 254 |
video_feat, frame_embed, _, text_feat = \
|
| 255 |
model.encode_video_and_text_with_videoprism(clip_input, [caption])
|