SitongGong's picture
Upload folder: ESTP-Bench
7f3f41b verified
import torch
from PIL import Image
from transformers import AutoConfig, AutoModel, AutoTokenizer, LlamaTokenizer, LlamaForCausalLM
from decord import VideoReader, cpu
import decord
from accelerate import init_empty_weights, infer_auto_device_map, load_checkpoint_in_model, dispatch_model
import torch.nn as nn
import einops
from .modelclass import Model
import torchshow as ts
import sys
sys.path.insert(0, "/root/videollm-online/baseline/TimeChat")
import argparse
import os
import random
import json
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torchshow as ts
from timechat.common.config import Config
from timechat.common.dist_utils import get_rank
from timechat.common.registry import registry
from timechat.conversation.conversation_video import Chat, Conversation, default_conversation,SeparatorStyle, conv_llava_llama_2
import decord
import cv2
import time
import subprocess
from decord import VideoReader
from timechat.processors.video_processor import ToTHWC, ToUint8, load_video
decord.bridge.set_bridge('torch')
# imports modules for registration
from timechat.datasets.builders import *
from timechat.models import *
from timechat.processors import *
from timechat.runners import *
from timechat.tasks import *
import random as rnd
from transformers import StoppingCriteria, StoppingCriteriaList
from PIL import Image
import gradio as gr
import random as rnd
import numpy as np
import re
import math
def ceil_time_by_fps(time: float, fps: int, min_time: float, max_time: float):
return min(max(math.ceil(time * fps) / fps, min_time), max_time)
# HACK: need to modifed timechat load video function
def load_video(video_path, start_time, end_time, n_frms=32, height=-1, width=-1, sampling="uniform", return_msg = False):
decord.bridge.set_bridge("torch")
vr = VideoReader(uri=video_path, height=height, width=width)
# HACK: need to modifed timechat load video function
fps = float(vr.get_avg_fps())
if start_time is not None:
start_time = ceil_time_by_fps(start_time, fps, min_time=0, max_time=len(vr)/fps)
start_frame = int(start_time * fps)
if end_time is not None:
end_time = ceil_time_by_fps(end_time, fps, min_time=0, max_time=len(vr)/fps)
end_frame = int(end_time * fps + 1)
vlen = end_frame - start_frame
acc_samples = min(n_frms, vlen)
n_frms = min(n_frms, vlen)
if sampling == "uniform":
indices = np.arange(start_frame, end_frame, vlen / n_frms).astype(int).tolist()
elif sampling == "headtail":
indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2))
indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2))
indices = indices_h + indices_t
elif sampling == 'rand':
# split the video into `acc_samples` intervals, and sample from each interval.
intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
ranges = []
for idx, interv in enumerate(intervals[:-1]):
ranges.append((interv, intervals[idx + 1] - 1))
try:
indices = [rnd.choice(range(x[0], x[1])) for x in ranges]
except:
indices = np.random.permutation(vlen)[:acc_samples]
indices.sort()
indices = list(indices)
else:
raise NotImplementedError
# get_batch -> T, H, W, C
temp_frms = vr.get_batch(indices)
# print(type(temp_frms))
tensor_frms = torch.from_numpy(temp_frms) if type(temp_frms) is not torch.Tensor else temp_frms
frms = tensor_frms.permute(3, 0, 1, 2).float() # (C, T, H, W)
if not return_msg:
return frms
fps = float(vr.get_avg_fps())
sec = ", ".join([str(round(f / fps, 1)) for f in indices])
# " " should be added in the start and end
msg = f"The video contains {len(indices)} frames sampled at {sec} seconds. "
return frms, msg
class TimeChat(Model):
def __init__(self, device, config=None):
"""
Initialize the model by loading the pretrained TimeChat model and tokenizer.
"""
self.device = device
self.config = config
# Parse configuration from TimeChat
cfg = Config(config)
# Load model configuration
DIR="/home/docker_shared/asus/zhangyl/model/huggingface/hub/models--ShuhuaiRen--TimeChat-7b/snapshots/e12f42c6c9bd114525e99b2d5b1903d86ea3ce43/"
MODEL_DIR=f"{DIR}/timechat_7b.pth"
model_config = cfg.model_cfg
model_config.device_8bit = int(device.split(':')[1]) if ':' in device else 0
model_config.ckpt = MODEL_DIR
model_config.vit_model = "/root/videollm-online/baseline/TimeChat/ckpt/eva_vit_g.pth"
model_config.q_former_model = "/root/videollm-online/baseline/TimeChat/ckpt/instruct_blip_vicuna7b_trimmed.pth"
# Initialize TimeChat model
model_cls = registry.get_model_class(model_config.arch)
self.model = model_cls.from_config(model_config).to(device)
self.model.eval()
# Initialize processor for video frames
vis_processor_cfg = cfg.datasets_cfg.webvid.vis_processor.train
self.vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
self.chat = Chat(self.model, self.vis_processor, device=device)
# Set parameters from config
self.frame_fps = config.frame_fps
self.MAX_NUM_FRAMES = config.max_frames_num
self.num_beams = config.num_beams
self.temperature = config.temperature
self.height = config.height
self.width = config.width
def Run(self, file, inp, start_time, end_time):
"""
Given the file (video file path) and input prompt (inp), run the model and return the response.
"""
# Encode video frames
frames, msg = load_video(
video_path=file,
start_time=start_time,
end_time=end_time,
n_frms=self.MAX_NUM_FRAMES,
height=self.height,
width=self.width,
sampling ="uniform", return_msg = True
)
img_list = []
chat_state = conv_llava_llama_2.copy()
chat_state.system = "You are able to understand the visual content that the user provides. Follow the instructions carefully and explain your answers in detail."
msg = self.chat.upload_video_without_audio(
video_path=(frames, msg),
conv=chat_state,
img_list=img_list,
n_frms=self.MAX_NUM_FRAMES,
)
text_input = "You are given a video. Please watch the video, identify all relevant moments that help answer the question. For each moments, determine the starting and ending times and provide a answer. The format should be: 'start time - end time, answer'. For example, ' 90 - 102 seconds, spread margarine on two slices of white bread'."
text_input = open('/root/videollm-online/baseline/TimeChat/prompts/tvg_description.txt').read()
self.chat.ask(text_input.format(inp), chat_state)
# self.chat.ask(text_input + "\n" + inp, chat_state)
num_beams = self.num_beams
temperature = self.temperature
llm_message = self.chat.answer(conv=chat_state,img_list=img_list,num_beams=num_beams,temperature=temperature,max_new_tokens=300,max_length=2000)[0]
response = llm_message.split('\n')[-1]
breakpoint()
# parse response
pattern = r"(\d+\.\d+) - (\d+\.\d+)\s*seconds,\s*(.*)"
matches = re.findall(pattern, response)
answer_pairs = list()
for match in matches:
moment_start, moment_end, answer = float(match[0]), float(match[1]), match[2]
answer_pairs.append(((moment_start + moment_end) / 2, answer))
breakpoint()
return answer_pairs
@staticmethod
def name():
"""
Return the name of the model
"""
return "TimeChat"