SitongGong's picture
Upload folder: ESTP-Bench
7f3f41b verified
import os
import transformers
from data.utils import ffmpeg_once
from baseline.livecc.demo.infer import LiveCCDemoInfer
from decord import VideoReader, cpu
logger = transformers.logging.get_logger('liveinfer')
import time, math
def ceil_time_by_fps(time: float, fps: int, min_time: float, max_time: float):
return min(max(math.ceil(time * fps) / fps, min_time), max_time)
class VideollmOnline:
def __init__(self, device, config=None):
"""
Initialize the model
"""
super().__init__()
self.liveinfer = LiveCCDemoInfer(device=device, config=config)
@staticmethod
def name():
"""
Return the name of the model
"""
return "LiveCC"
# def videollmOnline_Run(self, file, inp, timestamp, start_time=None, end_time=None):
# self.liveinfer.reset()
# name, ext = os.path.splitext(file)
# name = name.split('/')[-1]
# ffmpeg_video_path = os.path.join('./cache', name + f'_{self.liveinfer.frame_fps}fps_{self.liveinfer.frame_resolution}' + ext)
# os.makedirs(os.path.dirname(ffmpeg_video_path), exist_ok=True)
# ffmpeg_once(file, ffmpeg_video_path, fps=self.liveinfer.frame_fps, resolution=self.liveinfer.frame_resolution)
# logger.warning(f'{file} -> {ffmpeg_video_path}, {self.liveinfer.frame_fps} FPS, {self.liveinfer.frame_resolution} Resolution')
# self.liveinfer.load_video(ffmpeg_video_path)
# self.liveinfer.input_query_stream(inp, video_time=timestamp)
# for i in range(self.liveinfer.num_video_frames):
# self.liveinfer.input_video_stream(i / self.liveinfer.frame_fps)
# query, response = self.liveinfer()
# if response:
# print(response)
# return response
def get_cache_token_num(self):
return self.liveinfer.get_memory_size()
def Run(self, file, inp, start_time=None, end_time=None, query_time=0):
self.liveinfer.reset()
vr = VideoReader(file, ctx=cpu(0), num_threads=1)
sample_fps = round(vr.get_avg_fps() / 1) # FPS
if start_time is not None:
start_time = ceil_time_by_fps(start_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps)
start_frame = int(start_time * sample_fps)
if end_time is not None:
end_time = ceil_time_by_fps(end_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps)
end_frame = int(end_time * sample_fps + 1)
self.liveinfer.load_video(file, range(start_frame, end_frame))
if isinstance(inp, str):
self.liveinfer.input_query_stream(inp, video_time=query_time - start_time)
elif isinstance(inp, list):
assert isinstance(query_time, list), "query_time must be a list"
for i in range(len(inp)):
self.liveinfer.input_query_stream(inp[i], video_time=query_time[i] - start_time)
timecosts = []
history = {'video_path': file, 'frame_fps': self.liveinfer.frame_fps, 'conversation': []}
for i in range(self.liveinfer.num_video_frames):
real_start_time = time.time()
self.liveinfer.input_video_stream(i / self.liveinfer.frame_fps)
query, response = self.liveinfer()
real_end_time = time.time()
timecosts.append(real_end_time - real_start_time)
fps = (i + 1) / sum(timecosts)
if query:
history['conversation'].append({'role': 'user', 'content': query, 'time': self.liveinfer.video_time + start_time, 'fps': fps, 'cost': timecosts[-1], 'kv_cache_size': self.get_cache_token_num()})
if response:
history['conversation'].append({'role': 'assistant', 'content': response, 'time': self.liveinfer.video_time + start_time, 'fps': fps, 'cost': timecosts[-1], 'kv_cache_size': self.get_cache_token_num()})
# if not query and not response:
# history['conversation'].append({'time': self.liveinfer.video_time + start_time, 'fps': fps, 'cost': timecosts[-1]})
history['conversation'].append({'role': 'fps', 'content': (end_time - start_time) / sum(timecosts)})
return history['conversation']