SitongGong's picture
Upload folder: ESTP-Bench
7f3f41b verified
import torch
from llava.constants import (
DEFAULT_IM_END_TOKEN,
DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN,
IMAGE_PLACEHOLDER,
IMAGE_TOKEN_INDEX,
)
from llava.conversation import SeparatorStyle, conv_templates
from llava.mm_utils import KeywordsStoppingCriteria, get_model_name_from_path, process_images, tokenizer_image_token
from llava.model.builder import load_pretrained_model
from decord import VideoReader, cpu
import numpy as np
import math
def ceil_time_by_fps(time: float, fps: int, min_time: float, max_time: float):
return min(max(math.ceil(time * fps) / fps, min_time), max_time)
from model.modelclass import Model
class VILA(Model):
def __init__(self, device, config=None):
VILA_Init()
self.device = device
# HACK: add device to config
self.config = config
self.frame_fps = config.frame_fps
self.MAX_NUM_FRAMES = 16
def Run(self, file, inp, start_time, end_time):
return VILA_Run(file, inp, start_time, end_time)
def name(self):
return "VILA"
tokenizer, model, image_processor, context_len = None, None, None, None
def VILA_Init():
model_path = "Efficient-Large-Model/Llama-3-VILA1.5-8B"
model_name = get_model_name_from_path(model_path)
global tokenizer, model, image_processor, context_len
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, model_name, None)
def load_video(video_path, start_time, end_time, max_num_frames=32):
vr = VideoReader(video_path, ctx=cpu(0))
total_frame_num = len(vr)
fps = round(vr.get_avg_fps())
sample_fps = round(fps / 1)
# HACK : load video by start time 2 end time
if start_time is not None:
start_time = ceil_time_by_fps(start_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps)
start_frame = int(start_time * sample_fps)
if end_time is not None:
end_time = ceil_time_by_fps(end_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps)
end_frame = int(end_time * sample_fps + 1)
frame_idx = [i for i in range(start_frame, end_frame, sample_fps)]
if len(frame_idx) > max_num_frames:
sample_fps = max_num_frames
uniform_sampled_frames = np.linspace(start_frame, end_frame-1, sample_fps, dtype=int)
frame_idx = uniform_sampled_frames.tolist()
spare_frames = vr.get_batch(frame_idx).numpy()
return spare_frames
def VILA_Run(file, inp, start_time, end_time):
num_video_frames = 16
# images, num_frames = opencv_extract_frames(file, num_video_frames)
images = load_video(file, start_time, end_time, num_video_frames)
num_frames = len(images)
image_token_se = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
if model.config.mm_use_im_start_end:
qs = (image_token_se + "\n") * len(images) + inp
else:
qs = (DEFAULT_IMAGE_TOKEN + "\n") * len(images) + inp
conv_mode = "llama_3"
conv = conv_templates[conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
images_tensor = process_images(images, image_processor, model.config).to(model.device, dtype=torch.float16)
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).cuda()
stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
keywords = [stop_str]
stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
with torch.inference_mode():
output_ids = model.generate(
input_ids,
images=[
images_tensor,
],
do_sample=True,
temperature=0.2,
top_p=None,
num_beams=1,
max_new_tokens=512,
use_cache=True,
stopping_criteria=[stopping_criteria],
)
outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
outputs = outputs.strip()
if outputs.endswith(stop_str):
outputs = outputs[: -len(stop_str)]
outputs = outputs.strip()
print(outputs)
return outputs