SitongGong's picture
Upload folder: ESTP-Bench
7f3f41b verified
import torch
from PIL import Image
from transformers import AutoConfig, AutoModel, AutoTokenizer
from decord import VideoReader, cpu
from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_in_model, dispatch_model
from .modelclass import Model
import math
def ceil_time_by_fps(time: float, fps: int, min_time: float, max_time: float):
return min(max(math.ceil(time * fps) / fps, min_time), max_time)
class MiniCPMV(Model):
def __init__(self, device, model_path='openbmb/MiniCPM-V-2_6', config=None):
"""
Initialize the model by loading the pretrained MiniCPM-V model and tokenizer.
"""
self.model = AutoModel.from_pretrained(model_path, trust_remote_code=True, attn_implementation='sdpa', torch_dtype=torch.bfloat16) # sdpa or flash_attention_2, no eager
self.model = self.model.eval().to(device)
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.MAX_NUM_FRAMES = 64 # Maximum number of frames to process
# HACK : add some config
self.config = config
self.frame_fps = config.frame_fps
def encode_video(self, video_path, start_time, end_time):
"""
Encode the video frames from the video path.
"""
def uniform_sample(l, n):
gap = len(l) / n
idxs = [int(i * gap + gap / 2) for i in range(n)]
return [l[i] for i in idxs]
vr = VideoReader(video_path, ctx=cpu(0))
sample_fps = round(vr.get_avg_fps() / 1) # FPS
# HACK : load video by start time 2 end time
if start_time is not None:
start_time = ceil_time_by_fps(start_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps)
start_frame = int(start_time * sample_fps)
if end_time is not None:
end_time = ceil_time_by_fps(end_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps)
end_frame = int(end_time * sample_fps + 1)
frame_idx = [i for i in range(start_frame, end_frame, sample_fps)] # each second sample one frame
if len(frame_idx) > self.MAX_NUM_FRAMES:
frame_idx = uniform_sample(frame_idx, self.MAX_NUM_FRAMES)
frames = vr.get_batch(frame_idx).numpy()
frames = [Image.fromarray(v.astype('uint8')) for v in frames]
# print('Number of frames:', len(frames))
return frames
def Run(self, file, inp, start_time, end_time):
"""
Given the file (video file path) and input prompt (inp), run the model and return the response.
"""
frames = self.encode_video(file, start_time, end_time)
msgs = [
{'role': 'user', 'content': frames + [inp]},
]
# Set decode parameters for video
params = {
"use_image_id": False,
"max_slice_nums": 1 # Adjust if CUDA OOM and video resolution > 448x448
}
# Generate the response using the model
answer = self.model.chat(
image=None,
msgs=msgs,
tokenizer=self.tokenizer,
**params
)
return answer, len(frames)
@staticmethod
def name():
"""
Return the name of the model
"""
return "MiniCPMV"