| import torch |
| import os |
| import numpy as np |
| from PIL import Image |
| import copy |
|
|
| from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN |
| from llava.conversation import conv_templates, SeparatorStyle |
| from llava.model.builder import load_pretrained_model |
| from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria, process_images |
|
|
| from decord import VideoReader, cpu |
| from transformers import AutoConfig |
|
|
| device = "cuda:0" |
| device_map = "auto" |
|
|
| from .modelclass import Model |
| import math |
| def ceil_time_by_fps(time: float, fps: int, min_time: float, max_time: float): |
| return min(max(math.ceil(time * fps) / fps, min_time), max_time) |
|
|
| class LLaVANextVideo7(Model): |
| def __init__(self, device, config=None): |
| LLaVANextVideo32_Init() |
| model.to(device) |
| |
| |
| self.device = device |
| self.config = config |
| self.frame_fps = config.frame_fps |
| self.MAX_NUM_FRAMES = config.max_frames_num |
|
|
| def Run(self, file, inp, start_time, end_time): |
| return LLaVANextVideo32_Run(file, inp, start_time, end_time) |
| |
| def name(self): |
| return "LLaVANextVideo7B" |
|
|
| cfg_pretrained, tokenizer, model, image_processor = None, None, None, None |
|
|
| def LLaVANextVideo32_Init(): |
| |
| model_path = "lmms-lab/LLaVA-Video-7B-Qwen2" |
| model_name = get_model_name_from_path(model_path) |
| |
| overwrite_config = {} |
| overwrite_config["mm_spatial_pool_mode"] = "average" |
| overwrite_config["mm_spatial_pool_stride"] = 2 |
| overwrite_config["mm_newline_position"] = "grid" |
| overwrite_config["mm_pooling_position"] = "after" |
|
|
| global cfg_pretrained, tokenizer, model, image_processor |
| cfg_pretrained = AutoConfig.from_pretrained(model_path) |
|
|
| tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, None, model_name, load_8bit=False, overwrite_config=overwrite_config, attn_implementation="sdpa", device_map = device) |
| model.eval() |
| if tokenizer.pad_token_id is None: |
| if "qwen" in tokenizer.name_or_path.lower(): |
| print("Setting pad token to bos token for qwen model.") |
| tokenizer.pad_token_id = 151643 |
|
|
| def load_video(video_path, start_time, end_time): |
| vr = VideoReader(video_path, ctx=cpu(0)) |
| total_frame_num = len(vr) |
| fps = round(vr.get_avg_fps()) |
| sample_fps = round(fps / 1) |
| |
| |
| if start_time is not None: |
| start_time = ceil_time_by_fps(start_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps) |
| start_frame = int(start_time * sample_fps) |
| if end_time is not None: |
| end_time = ceil_time_by_fps(end_time, sample_fps, min_time=0, max_time=len(vr)/sample_fps) |
| end_frame = int(end_time * sample_fps + 1) |
| frame_idx = [i for i in range(start_frame, end_frame, sample_fps)] |
| |
| if len(frame_idx) > 32: |
| sample_fps = 32 |
| uniform_sampled_frames = np.linspace(start_frame, end_frame-1, sample_fps, dtype=int) |
| frame_idx = uniform_sampled_frames.tolist() |
| spare_frames = vr.get_batch(frame_idx).numpy() |
| return spare_frames |
|
|
| def LLaVANextVideo32_Run(file, inp, start_time, end_time): |
| image_tensors = [] |
| image_sizes = [] |
| if file.endswith('.mp4'): |
| video_frames = load_video(file, start_time, end_time) |
| frames = image_processor.preprocess(video_frames, return_tensors="pt")["pixel_values"].half().to(model.device) |
| image_tensors.append(frames) |
| image_sizes = [frame.size for frame in video_frames] |
| modality = "video" |
| elif file.endswith('.jpg'): |
| image = Image.open(file) |
| image_tensor = process_images([image], image_processor, model.config) |
| image_tensors = [_image.to(dtype=torch.float16, device=model.device) for _image in image_tensor] |
| image_sizes = [image.size] |
| modality = "image" |
| else: |
| images = [] |
| for img in os.listdir(file): |
| img = os.path.join(file, img) |
| image = np.asarray(Image.open(img)) |
| images.append(image) |
| frames = image_processor.preprocess(images, return_tensors="pt")["pixel_values"].half().to(model.device) |
| image_tensors.append(frames) |
| image_sizes = [frame.size for frame in images] |
| modality = "video" |
|
|
| |
| conv_template = "qwen_1_5" |
| question = f"{DEFAULT_IMAGE_TOKEN}\n{inp}" |
|
|
| conv = copy.deepcopy(conv_templates[conv_template]) |
| conv.append_message(conv.roles[0], question) |
| conv.append_message(conv.roles[1], None) |
| prompt_question = conv.get_prompt() |
|
|
| input_ids = tokenizer_image_token(prompt_question, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(model.device) |
|
|
| |
| cont = model.generate( |
| input_ids, |
| images=image_tensors, |
| image_sizes=image_sizes, |
| do_sample=False, |
| temperature=0, |
| max_new_tokens=4096, |
| modalities=[modality], |
| ) |
| text_outputs = tokenizer.batch_decode(cont, skip_special_tokens=True) |
| response = text_outputs[0] |
| return response, video_frames.shape[0] |