| | --- |
| | license: mit |
| | base_model: |
| | - lmms-lab/LLaVA-Video-7B-Qwen2 |
| | --- |
| | |
| | # LLaVA-Video-7B-Qwen2-UnifiedReward-DPO |
| |
|
| | ## Model Summary |
| |
|
| | This model is trained on LLaVA-Video-7B-Qwen2 based on DPO preference data constructed by our [UnifiedReward-7B](https://huggingface.co/CodeGoat24/UnifiedReward-7b) for enhanced video understanding ability. |
| |
|
| | For further details, please refer to the following resources: |
| | - π° Paper: https://arxiv.org/pdf/2503.05236 |
| | - πͺ Project Page: https://codegoat24.github.io/UnifiedReward/ |
| | - π€ Model Collections: https://huggingface.co/collections/CodeGoat24/unifiedreward-models-67c3008148c3a380d15ac63a |
| | - π€ Dataset Collections: https://huggingface.co/collections/CodeGoat24/unifiedreward-training-data-67c300d4fd5eff00fa7f1ede |
| | - π Point of Contact: [Yibin Wang](https://codegoat24.github.io) |
| |
|
| |
|
| | ### Quick Start |
| |
|
| | ~~~python |
| | # pip install git+https://github.com/LLaVA-VL/LLaVA-NeXT.git |
| | from llava.model.builder import load_pretrained_model |
| | from llava.mm_utils import get_model_name_from_path, process_images, tokenizer_image_token |
| | from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IGNORE_INDEX |
| | from llava.conversation import conv_templates, SeparatorStyle |
| | from PIL import Image |
| | import requests |
| | import copy |
| | import torch |
| | import sys |
| | import warnings |
| | from decord import VideoReader, cpu |
| | import numpy as np |
| | warnings.filterwarnings("ignore") |
| | def load_video(video_path, max_frames_num,fps=1,force_sample=False): |
| | if max_frames_num == 0: |
| | return np.zeros((1, 336, 336, 3)) |
| | vr = VideoReader(video_path, ctx=cpu(0),num_threads=1) |
| | total_frame_num = len(vr) |
| | video_time = total_frame_num / vr.get_avg_fps() |
| | fps = round(vr.get_avg_fps()/fps) |
| | frame_idx = [i for i in range(0, len(vr), fps)] |
| | frame_time = [i/fps for i in frame_idx] |
| | if len(frame_idx) > max_frames_num or force_sample: |
| | sample_fps = max_frames_num |
| | uniform_sampled_frames = np.linspace(0, total_frame_num - 1, sample_fps, dtype=int) |
| | frame_idx = uniform_sampled_frames.tolist() |
| | frame_time = [i/vr.get_avg_fps() for i in frame_idx] |
| | frame_time = ",".join([f"{i:.2f}s" for i in frame_time]) |
| | spare_frames = vr.get_batch(frame_idx).asnumpy() |
| | # import pdb;pdb.set_trace() |
| | return spare_frames,frame_time,video_time |
| | pretrained = "CodeGoat24/LLaVA-Video-7B-Qwen2-UnifiedReward-DPO" |
| | model_name = "llava_qwen" |
| | device = "cuda" |
| | device_map = "auto" |
| | tokenizer, model, image_processor, max_length = load_pretrained_model(pretrained, None, model_name, torch_dtype="bfloat16", device_map=device_map) # Add any other thing you want to pass in llava_model_args |
| | model.eval() |
| | video_path = "XXXX" |
| | max_frames_num = 64 |
| | video,frame_time,video_time = load_video(video_path, max_frames_num, 1, force_sample=True) |
| | video = image_processor.preprocess(video, return_tensors="pt")["pixel_values"].cuda().half() |
| | video = [video] |
| | conv_template = "qwen_1_5" # Make sure you use correct chat template for different models |
| | question = DEFAULT_IMAGE_TOKEN + "\nPlease describe this video in detail." |
| | conv = copy.deepcopy(conv_templates[conv_template]) |
| | conv.append_message(conv.roles[0], question) |
| | conv.append_message(conv.roles[1], None) |
| | prompt_question = conv.get_prompt() |
| | input_ids = tokenizer_image_token(prompt_question, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device) |
| | cont = model.generate( |
| | input_ids, |
| | images=video, |
| | modalities= ["video"], |
| | do_sample=False, |
| | temperature=0, |
| | max_new_tokens=4096, |
| | ) |
| | text_outputs = tokenizer.batch_decode(cont, skip_special_tokens=True)[0].strip() |
| | print(text_outputs) |
| | ~~~ |
| |
|
| |
|
| | ## Citation |
| |
|
| | ``` |
| | @article{unifiedreward, |
| | title={Unified reward model for multimodal understanding and generation}, |
| | author={Wang, Yibin and Zang, Yuhang and Li, Hao and Jin, Cheng and Wang, Jiaqi}, |
| | journal={arXiv preprint arXiv:2503.05236}, |
| | year={2025} |
| | } |
| | ``` |