|
|
import copy |
|
|
import itertools |
|
|
import re |
|
|
import os |
|
|
import json |
|
|
from enum import auto, Enum |
|
|
import dataclasses |
|
|
from typing import Any, List |
|
|
|
|
|
from PIL import Image |
|
|
import cv2 |
|
|
import imageio |
|
|
import numpy as np |
|
|
import torch |
|
|
from torch.utils.data import Dataset |
|
|
import torchvision.transforms as T |
|
|
from torchvision.transforms.functional import InterpolationMode |
|
|
from moviepy.editor import VideoFileClip |
|
|
|
|
|
|
|
|
from decord import VideoReader, cpu |
|
|
from transformers import StoppingCriteria, StoppingCriteriaList |
|
|
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection |
|
|
|
|
|
from util.easydict import EasyDict |
|
|
|
|
|
IMAGE_TOKEN = "<image>" |
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
class SeparatorStyle(Enum): |
|
|
"""Different separator style.""" |
|
|
SINGLE = auto() |
|
|
TWO = auto() |
|
|
MPT = auto() |
|
|
|
|
|
class MultiModalConvStyle(Enum): |
|
|
"""Different separator style.""" |
|
|
MM_ALONE = 'mm_alone' |
|
|
MM_INTERLEAF = 'mm_inferleaf' |
|
|
|
|
|
@dataclasses.dataclass |
|
|
class Conversation(EasyDict): |
|
|
"""A class that keeps all conversation history.""" |
|
|
system: str |
|
|
roles: List[str] |
|
|
messages: List[List[str]] |
|
|
sep: List[str] |
|
|
mm_token: str |
|
|
|
|
|
mm_style: MultiModalConvStyle = MultiModalConvStyle.MM_INTERLEAF |
|
|
pre_query_prompt: str=None |
|
|
post_query_prompt: str=None |
|
|
answer_prompt: str=None |
|
|
|
|
|
def __init__(self, *args, **kwargs): |
|
|
super().__init__(*args, **kwargs) |
|
|
if isinstance(self.sep, str): |
|
|
self.sep = [self.sep for _ in self.roles] |
|
|
|
|
|
def get_prompt(self): |
|
|
sep = [self.sep for _ in self.roles] if isinstance(self.sep, str) else self.sep |
|
|
sep = dict(zip(self.roles, sep)) |
|
|
ret = self.system + sep[self.roles[0]] if self.system != "" else "" |
|
|
for i, (role, message) in enumerate(self.messages): |
|
|
|
|
|
if i+1 == len(self.messages): |
|
|
if role != self.roles[-1]: |
|
|
ret += role + message + sep[role] + self.roles[-1] |
|
|
else: |
|
|
ret += role + message |
|
|
else: |
|
|
ret += role + message + sep[role] |
|
|
return ret |
|
|
|
|
|
|
|
|
def user_query(self, query=None, pre_query_prompt=None, post_query_prompt=None, is_mm=False, num_mm_token=1): |
|
|
if post_query_prompt is not None: |
|
|
query = f"{query} {post_query_prompt}" |
|
|
|
|
|
if pre_query_prompt is not None: |
|
|
query = f"{pre_query_prompt} {query}" |
|
|
role = self.roles[0] |
|
|
|
|
|
if is_mm: |
|
|
mm_str = num_mm_token*self.mm_token[:-1] + self.mm_token[-1] |
|
|
if self.mm_style == MultiModalConvStyle.MM_ALONE: |
|
|
self._append_message(role, mm_str) |
|
|
elif self.mm_style == MultiModalConvStyle.MM_INTERLEAF: |
|
|
if self.mm_token not in query: |
|
|
query = f'{mm_str} {query}' |
|
|
self._append_message(role, query) |
|
|
|
|
|
def assistant_response(self, response, pre_query_prompt=None, post_query_prompt=None): |
|
|
if post_query_prompt is not None: |
|
|
response = f"{response} {post_query_prompt}" |
|
|
|
|
|
if pre_query_prompt is not None: |
|
|
response = f"{post_query_prompt} {response}" |
|
|
|
|
|
role = self.roles[1] |
|
|
self._append_message(role, response) |
|
|
|
|
|
def _append_message(self, role, message): |
|
|
message = '' if message is None else message |
|
|
self.messages.append([role, message]) |
|
|
|
|
|
def copy(self): |
|
|
return copy.deepcopy(self) |
|
|
|
|
|
conv_video_chatgpt_v1 = Conversation( |
|
|
system="You are Video-ChatGPT, a large vision-language assistant. " |
|
|
"You are able to understand the video content that the user provides, and assist the user with a variety of tasks using natural language." |
|
|
"Follow the instructions carefully and explain your answers in detail based on the provided video.", |
|
|
roles=("USER:", "ASSISTANT:"), |
|
|
messages=[], |
|
|
sep=[" ","</s>"], |
|
|
mm_token='<image>', |
|
|
mm_style=MultiModalConvStyle.MM_INTERLEAF, |
|
|
) |
|
|
|
|
|
|
|
|
conv_plain_v1 = Conversation( |
|
|
system="", |
|
|
roles=("USER:", "ASSISTANT:"), |
|
|
messages=[], |
|
|
sep=(" ", "</s>"), |
|
|
mm_token='<image>' |
|
|
) |
|
|
|
|
|
|
|
|
conv_eval_vcg = Conversation( |
|
|
system="You are Video-ChatGPT, a large vision-language assistant. " |
|
|
"You are able to understand the video content that the user provides, and assist the user with a variety of tasks using natural language." |
|
|
"Follow the instructions carefully and explain your answers in detail based on the provided video.", |
|
|
roles=("USER: ", "ASSISTANT:"), |
|
|
messages=[], |
|
|
sep=[" ","</s>"], |
|
|
mm_token='<image>\n', |
|
|
mm_style=MultiModalConvStyle.MM_ALONE, |
|
|
) |
|
|
|
|
|
conv_eval_vcg_llavanext = Conversation( |
|
|
system="You are Video-ChatGPT, a large vision-language assistant. " |
|
|
"You are able to understand the video content that the user provides, and assist the user with a variety of tasks using natural language." |
|
|
"Follow the instructions carefully and explain your answers in detail based on the provided video.", |
|
|
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"), |
|
|
messages=[], |
|
|
sep=["<|im_end|>\n","<|im_end|>\n"], |
|
|
mm_token='<image>\n', |
|
|
mm_style=MultiModalConvStyle.MM_ALONE, |
|
|
) |
|
|
|
|
|
SYSTEM_MVBENCH="Carefully watch the video and pay attention to the cause and sequence of events, the detail and movement of objects, and the action and pose of persons. Based on your observations, select the best option that accurately addresses the question.\n" |
|
|
conv_eval_mvbench = Conversation( |
|
|
system=SYSTEM_MVBENCH, |
|
|
roles=("USER: ", "ASSISTANT:"), |
|
|
messages=[], |
|
|
sep=[" ","</s>"], |
|
|
mm_token='<image>\n', |
|
|
mm_style=MultiModalConvStyle.MM_ALONE, |
|
|
) |
|
|
conv_eval_mvbench_llavanext = Conversation( |
|
|
system="You are Video-ChatGPT, a large vision-language assistant. " |
|
|
"You are able to understand the video content that the user provides, and assist the user with a variety of tasks using natural language." |
|
|
"Follow the instructions carefully and explain your answers in detail based on the provided video.", |
|
|
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"), |
|
|
messages=[], |
|
|
sep=["<|im_end|>\n","<|im_end|>\n"], |
|
|
mm_token='<image>\n', |
|
|
mm_style=MultiModalConvStyle.MM_ALONE, |
|
|
) |
|
|
|
|
|
|
|
|
conv_eval_videoqabench = Conversation( |
|
|
system="", |
|
|
roles=("USER: ", "ASSISTANT:"), |
|
|
messages=[], |
|
|
sep=[" ","</s>"], |
|
|
mm_token='<image>\n', |
|
|
mm_style=MultiModalConvStyle.MM_INTERLEAF, |
|
|
pre_query_prompt="The input consists of a sequence of key frames from a video. Answer the question concisely first and followed by significant events, characters, or objects that appear throughout the frames. Question:", |
|
|
post_query_prompt="\n", |
|
|
answer_prompt='\nAnswer: In the video,' |
|
|
) |
|
|
|
|
|
conv_eval_videoqa_llavanext = Conversation( |
|
|
system="<|im_start|>system\nAnswer the question.", |
|
|
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"), |
|
|
messages=[], |
|
|
sep=["<|im_end|>\n","<|im_end|>\n"], |
|
|
mm_token='<image>\n', |
|
|
mm_style=MultiModalConvStyle.MM_INTERLEAF, |
|
|
pre_query_prompt="The input consists of a sequence of key frames from a video. Answer the question concisely first and followed by significant events, characters, or objects that appear throughout the frames. Question:", |
|
|
post_query_prompt="\n", |
|
|
answer_prompt='\nAnswer: In the video,' |
|
|
) |
|
|
|
|
|
|
|
|
SYSTEM_RECAPTION="""You are a powerful Video Magic ChatBot, a large vision-language assistant. |
|
|
You are able to understand the video content that the user provides and assist the user in a video recaptioning task. |
|
|
The user will provide you with the video and maybe some extra noisy information to help you out. Make use of the information in a proper way to be competent for the recaption job |
|
|
### INSTRUCTIONS: |
|
|
1. Follow the user's instruction. |
|
|
2. Be critical yet believe in yourself. |
|
|
""" |
|
|
conv_eval_recaption = Conversation( |
|
|
system=SYSTEM_RECAPTION, |
|
|
roles=("USER: ", "ASSISTANT:"), |
|
|
messages=[], |
|
|
sep=[" ","</s>"], |
|
|
mm_token='<image>\n', |
|
|
mm_style=MultiModalConvStyle.MM_ALONE, |
|
|
) |
|
|
|
|
|
|
|
|
conv_eval_recaption_llavanext = Conversation( |
|
|
system=SYSTEM_RECAPTION, |
|
|
roles=("<|im_start|>user\n", "<|im_start|>assistant\n"), |
|
|
messages=[], |
|
|
sep=["<|im_end|>\n","<|im_end|>\n"], |
|
|
mm_token='<image>\n', |
|
|
mm_style=MultiModalConvStyle.MM_ALONE, |
|
|
) |
|
|
|
|
|
|
|
|
conv_templates = { |
|
|
"plain": conv_plain_v1, |
|
|
"eval_vcgbench": conv_eval_vcg, |
|
|
"eval_vcg_llavanext": conv_eval_vcg_llavanext, |
|
|
"eval_mvbench": conv_eval_mvbench, |
|
|
"eval_mvbench_llavanext": conv_eval_mvbench_llavanext, |
|
|
"eval_videoqabench": conv_eval_videoqabench, |
|
|
"eval_videoqa_llavanext": conv_eval_videoqa_llavanext, |
|
|
"eval_recaption": conv_eval_recaption, |
|
|
"eval_recaption_llavanext": conv_eval_recaption_llavanext, |
|
|
} |