SitongGong's picture
Upload folder: ESTP-Bench
7f3f41b verified
import torch
from PIL import Image
from transformers import AutoConfig, AutoModel, AutoTokenizer
from decord import VideoReader, cpu
from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_in_model, dispatch_model
from .modelclass import Model
from torchvision import transforms
import sys
sys.path.insert(0, '/root/videollm-online/baseline/sdqes_baselines')
from models import get_model_class
import math
def ceil_time_by_fps(time: float, fps: int, min_time: float, max_time: float):
return min(max(math.ceil(time * fps) / fps, min_time), max_time)
class CLIP(Model):
def __init__(self, device, config=None):
if config.checkpoint_path is not None:
self.model = get_model_class(config.model_name).load_from_checkpoint(**vars(config), num_training_steps_per_epoch=10, num_val_dataloaders=len([]))
else:
self.model = get_model_class(config.model_name)(**vars(config), num_training_steps_per_epoch=10, num_val_dataloaders=len([]))
self.model = self.model.to(device)
self.model.eval()
self.MAX_NUM_FRAMES = config.n_frames # Maximum number of frames to process
# HACK : add some config
self.config = config
self.frame_fps = config.frame_sample_rate
self.video_transform = transforms.Compose([
transforms.Resize(config.spatial_size),
transforms.CenterCrop(config.spatial_size),
transforms.Resize(config.spatial_size),
transforms.Normalize(mean=config.norm_mean, std=config.norm_std)
])
def encode_video(self, video_path, start_time, end_time):
"""
Encode the video frames from the video path.
"""
# TODO: check if the video is too short
def uniform_sample(l, n):
gap = len(l) / n
idxs = [int(i * gap + gap / 2) for i in range(n)]
return [l[i] for i in idxs]
vr = VideoReader(video_path, ctx=cpu(0))
sample_fps = round(vr.get_avg_fps() / 1) # FPS
# HACK : load video by start time 2 end time
if start_time is not None:
start_time = ceil_time_by_fps(start_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps)
start_frame = int(start_time * sample_fps)
if end_time is not None:
end_time = ceil_time_by_fps(end_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps)
end_frame = int(end_time * sample_fps + 1)
frame_idx = [i for i in range(start_frame, end_frame, sample_fps)] # each second sample one frame
frames = vr.get_batch(frame_idx) # [T, H, W, C]
# print('Number of frames:', len(frames))
# Reshape frames to [batch, channels, seq, height, width]
frames = frames.permute(0, 3, 1, 2) # [T, H, W, C] -> [T, C, H, W]
frames = frames.float() / 255.0
frames = self.video_transform(frames)
frames = frames.unsqueeze(0) # [1, T, C, H, W]
frames = frames.transpose(1, 2) # [1, T, C, H, W] -> [1, C, T, H, W]
return frames
def Run(self, file, inp, start_time, end_time, query_time):
"""
Given the file (video file path) and input prompt (inp), run the model and return the response.
"""
if inp and len(inp) > 70:
inp = inp[:70]
frames = self.encode_video(file, start_time, end_time).to(self.model.device)
logits = []
for start_idx in range(0, frames.shape[2], 60):
with torch.no_grad():
output_dict = self.model(frames[:, :, start_idx:start_idx+60, :, :], [inp])
logits.append(output_dict['logits'])
logits = torch.cat(logits, dim=1) if len(logits) > 1 else logits[0]
probs = torch.sigmoid(logits)
ts = torch.linspace(probs.cpu().numpy().min(), probs.cpu().numpy().max(), 20)
# t = ts[ts.shape[0] // 2]
t = 0.5
# NOTE: copy from SDQES model.metrics
is_pred = (probs > t)
shifted_is_pred = torch.roll(is_pred, shifts=1, dims=-1)
shifted_is_pred[...,0] = 0 # pad with zero to manage edge cases
# where the model predicts an action start
starts = is_pred & (~shifted_is_pred)
starts_indexed = (starts * torch.cumsum(starts, dim=-1)) # index of each action s
start_frames = torch.nonzero(starts_indexed)[:, 1]
conversation = [
{'role': 'user', 'content': inp, 'time': query_time}
]
for start_idx in start_frames:
conversation.append({
'role': 'assistant',
'content': f'Answer occurs.',
'time': start_time + start_idx.item() / self.frame_fps
})
return conversation
@staticmethod
def name():
"""
Return the name of the model
"""
return "CLIP"
# python -m data.estp_dataset.model.EgoVLP --model_path openbmb/MiniCPM-V-2_6 --device cuda:0 --config config.json