|
|
import base64 |
|
|
import re |
|
|
import time |
|
|
from io import BytesIO |
|
|
from typing import List, Optional, Tuple, Union |
|
|
|
|
|
import decord |
|
|
import numpy as np |
|
|
import torch |
|
|
from accelerate import Accelerator, DistributedType |
|
|
from loguru import logger as eval_logger |
|
|
from PIL import Image |
|
|
from tqdm import tqdm |
|
|
from transformers import ( |
|
|
AutoConfig, |
|
|
AutoModel, |
|
|
AutoModelForCausalLM, |
|
|
AutoModelForImageTextToText, |
|
|
AutoProcessor, |
|
|
AutoTokenizer, |
|
|
) |
|
|
|
|
|
from lmms_eval import utils |
|
|
from lmms_eval.api.instance import Instance |
|
|
from lmms_eval.api.model import lmms |
|
|
from lmms_eval.api.registry import register_model |
|
|
from lmms_eval.models.model_utils.gen_metrics import log_metrics |
|
|
from lmms_eval.models.model_utils.reasoning_model_utils import ( |
|
|
parse_reasoning_model_answer, |
|
|
) |
|
|
from lmms_eval.protocol import ChatMessages |
|
|
|
|
|
try: |
|
|
from qwen_vl_utils import process_vision_info |
|
|
except ImportError: |
|
|
eval_logger.warning("Failed to import qwen_vl_utils; Please install it via `pip install qwen-vl-utils`") |
|
|
|
|
|
|
|
|
@register_model("huggingface") |
|
|
class Huggingface(lmms): |
|
|
is_simple = False |
|
|
""" |
|
|
Qwen2.5_VL Model |
|
|
"https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct" |
|
|
""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
pretrained: str = "Qwen/Qwen2.5-VL-3B-Instruct", |
|
|
device: Optional[str] = "cuda", |
|
|
device_map: Optional[str] = "auto", |
|
|
batch_size: Optional[Union[int, str]] = 1, |
|
|
use_cache=True, |
|
|
attn_implementation: Optional[str] = None, |
|
|
max_num_frames: int = 32, |
|
|
use_custom_video_loader: Optional[bool] = False, |
|
|
fps: Optional[float] = None, |
|
|
max_image_size: Optional[int] = None, |
|
|
system_prompt: Optional[str] = "You are a helpful assistant.", |
|
|
interleave_visuals: Optional[bool] = False, |
|
|
reasoning_prompt: Optional[str] = None, |
|
|
**kwargs, |
|
|
) -> None: |
|
|
super().__init__() |
|
|
|
|
|
assert kwargs == {}, f"Unexpected kwargs: {kwargs}" |
|
|
|
|
|
|
|
|
valid_attn_implementations = [None, "flash_attention_2", "sdpa", "eager"] |
|
|
if attn_implementation not in valid_attn_implementations: |
|
|
raise ValueError(f"attn_implementation must be one of {valid_attn_implementations}, got {attn_implementation}") |
|
|
|
|
|
self.use_custom_video_loader = use_custom_video_loader |
|
|
self.fps = fps |
|
|
|
|
|
|
|
|
self.max_image_size = max_image_size |
|
|
if self.max_image_size and not self.use_custom_video_loader: |
|
|
raise ValueError("max_image_size is only applicable if use_custom_video_loader is True") |
|
|
|
|
|
accelerator = Accelerator() |
|
|
self.accelerator = accelerator |
|
|
if accelerator.num_processes > 1: |
|
|
self._device = torch.device(f"cuda:{accelerator.local_process_index}") |
|
|
self.device_map = f"cuda:{accelerator.local_process_index}" |
|
|
else: |
|
|
self._device = torch.device(device) |
|
|
self.device_map = device_map if device_map else device |
|
|
|
|
|
|
|
|
model_kwargs = { |
|
|
"torch_dtype": "bfloat16", |
|
|
"device_map": self.device_map, |
|
|
} |
|
|
|
|
|
|
|
|
if attn_implementation is not None: |
|
|
model_kwargs["attn_implementation"] = attn_implementation |
|
|
|
|
|
config = AutoConfig.from_pretrained(pretrained) |
|
|
if config.model_type in AutoModelForCausalLM._model_mapping.keys(): |
|
|
model_cls = AutoModelForCausalLM |
|
|
elif config.model_type in AutoModelForImageTextToText._model_mapping.keys(): |
|
|
model_cls = AutoModelForImageTextToText |
|
|
else: |
|
|
model_cls = AutoModel |
|
|
|
|
|
self._model = model_cls.from_pretrained(pretrained, **model_kwargs).eval() |
|
|
self.max_num_frames = max_num_frames |
|
|
|
|
|
if reasoning_prompt: |
|
|
self.reasoning_prompt = reasoning_prompt.replace("\\n", "\n") |
|
|
else: |
|
|
self.reasoning_prompt = None |
|
|
self.processor = AutoProcessor.from_pretrained(pretrained) |
|
|
self._tokenizer = AutoTokenizer.from_pretrained(pretrained) |
|
|
self.system_prompt = system_prompt |
|
|
self.interleave_visuals = interleave_visuals |
|
|
|
|
|
self._config = self.model.config |
|
|
self._max_length = kwargs.get("max_length", 2048) |
|
|
self.batch_size_per_gpu = int(batch_size) |
|
|
self.use_cache = use_cache |
|
|
|
|
|
if accelerator.num_processes > 1: |
|
|
assert accelerator.distributed_type in [ |
|
|
DistributedType.FSDP, |
|
|
DistributedType.MULTI_GPU, |
|
|
], "Unsupported distributed type provided. Only DDP and FSDP are supported." |
|
|
if accelerator.distributed_type == DistributedType.FSDP: |
|
|
self._model = accelerator.prepare(self.model) |
|
|
else: |
|
|
self._model = accelerator.prepare_model(self.model, evaluation_mode=True) |
|
|
self.accelerator = accelerator |
|
|
if self.accelerator.is_local_main_process: |
|
|
eval_logger.info(f"Using {accelerator.num_processes} devices with data parallelism") |
|
|
self._rank = self.accelerator.local_process_index |
|
|
self._world_size = self.accelerator.num_processes |
|
|
else: |
|
|
self._rank = 0 |
|
|
self._world_size = 1 |
|
|
|
|
|
@property |
|
|
def config(self): |
|
|
|
|
|
return self._config |
|
|
|
|
|
@property |
|
|
def tokenizer(self): |
|
|
return self._tokenizer |
|
|
|
|
|
@property |
|
|
def model(self): |
|
|
|
|
|
if hasattr(self, "accelerator"): |
|
|
return self.accelerator.unwrap_model(self._model) |
|
|
else: |
|
|
return self._model |
|
|
|
|
|
@property |
|
|
def eot_token_id(self): |
|
|
return self.tokenizer.eos_token_id |
|
|
|
|
|
@property |
|
|
def max_length(self): |
|
|
return self._max_length |
|
|
|
|
|
@property |
|
|
def batch_size(self): |
|
|
return self.batch_size_per_gpu |
|
|
|
|
|
@property |
|
|
def device(self): |
|
|
return self._device |
|
|
|
|
|
@property |
|
|
def rank(self): |
|
|
return self._rank |
|
|
|
|
|
@property |
|
|
def world_size(self): |
|
|
return self._world_size |
|
|
|
|
|
def loglikelihood(self, requests: List[Instance]) -> List[Tuple[float, bool]]: |
|
|
raise NotImplementedError("Loglikelihood is not implemented for Qwen2.5_VL") |
|
|
|
|
|
def flatten(self, input): |
|
|
new_list = [] |
|
|
for i in input: |
|
|
for j in i: |
|
|
new_list.append(j) |
|
|
return new_list |
|
|
|
|
|
def generate_until(self, requests: List[Instance]) -> List[str]: |
|
|
res = [] |
|
|
|
|
|
|
|
|
def _collate(x): |
|
|
return x[2], x[2] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
re_ords = utils.Collator([reg.args for reg in requests], _collate, group_fn=lambda x: x[2], grouping=True) |
|
|
chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None) |
|
|
num_iters = len(requests) // self.batch_size if len(requests) % self.batch_size == 0 else len(requests) // self.batch_size + 1 |
|
|
pbar = tqdm(total=num_iters, disable=(self.rank != 0), desc="Model Responding") |
|
|
e2e_latency = 0 |
|
|
total_tokens = 0 |
|
|
for chunk in chunks: |
|
|
ctx, doc_to_messages, all_gen_kwargs, doc_id, task, split = zip(*chunk) |
|
|
chat_messages = [doc_to_messages[0](self.task_dict[task][split][ids]) for ids, task, split in zip(doc_id, task, split)] |
|
|
chat_messages: List[ChatMessages] = [ChatMessages(**{"messages": message}) for message in chat_messages] |
|
|
visuals = [] |
|
|
videos = [] |
|
|
for messages in chat_messages: |
|
|
visual, video, _ = messages.extract_media() |
|
|
visuals.append(visual) |
|
|
videos.append(video) |
|
|
visuals = self.flatten(visuals) |
|
|
videos = self.flatten(videos) |
|
|
gen_kwargs = all_gen_kwargs[0] |
|
|
|
|
|
|
|
|
batched_messages = [chat_message.to_hf_messages() for chat_message in chat_messages] |
|
|
texts = [self.processor.apply_chat_template(msg, tokenize=False, add_generation_prompt=True) for msg in batched_messages] |
|
|
images = [] |
|
|
videos = [] |
|
|
audios = [] |
|
|
for messages in chat_messages: |
|
|
image, video, audio = messages.extract_media() |
|
|
images.append(image) |
|
|
videos.append(video) |
|
|
audios.append(audio) |
|
|
images = self.flatten(images) |
|
|
videos = self.flatten(videos) |
|
|
audios = self.flatten(audios) |
|
|
kwargs = {"images": images, "videos": videos, "audios": audios} |
|
|
inputs = self.processor(text=texts, padding=True, return_tensors="pt", **kwargs) |
|
|
|
|
|
if self.device_map == "auto": |
|
|
inputs = inputs.to("cuda") |
|
|
else: |
|
|
inputs = inputs.to(self.device) |
|
|
|
|
|
|
|
|
default_gen_kwargs = { |
|
|
"max_new_tokens": 4096, |
|
|
"temperature": 0.0, |
|
|
"top_p": None, |
|
|
"num_beams": 1, |
|
|
} |
|
|
|
|
|
current_gen_kwargs = {**default_gen_kwargs, **gen_kwargs} |
|
|
pad_token_id = self.tokenizer.pad_token_id |
|
|
|
|
|
if current_gen_kwargs["temperature"] > 0: |
|
|
current_gen_kwargs["do_sample"] = True |
|
|
else: |
|
|
current_gen_kwargs["do_sample"] = False |
|
|
current_gen_kwargs["temperature"] = None |
|
|
current_gen_kwargs["top_p"] = None |
|
|
|
|
|
start_time = time.time() |
|
|
cont = self.model.generate( |
|
|
**inputs, |
|
|
eos_token_id=self.tokenizer.eos_token_id, |
|
|
pad_token_id=pad_token_id, |
|
|
do_sample=current_gen_kwargs["do_sample"], |
|
|
temperature=current_gen_kwargs["temperature"], |
|
|
top_p=current_gen_kwargs["top_p"], |
|
|
num_beams=current_gen_kwargs["num_beams"], |
|
|
max_new_tokens=current_gen_kwargs["max_new_tokens"], |
|
|
use_cache=self.use_cache, |
|
|
) |
|
|
end_time = time.time() |
|
|
|
|
|
generated_ids_trimmed = [out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, cont)] |
|
|
answers = self.processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False) |
|
|
|
|
|
|
|
|
e2e_latency += end_time - start_time |
|
|
total_tokens += sum(len(ids) for ids in generated_ids_trimmed) |
|
|
|
|
|
for ans, context in zip(answers, texts): |
|
|
clean_ans = parse_reasoning_model_answer(ans) |
|
|
res.append(clean_ans) |
|
|
self.cache_hook.add_partial("generate_until", (context, gen_kwargs), clean_ans) |
|
|
pbar.update(1) |
|
|
|
|
|
eval_logger.debug(f"Question: {context}") |
|
|
eval_logger.debug(f"Model Raw Response: {ans}") |
|
|
eval_logger.debug(f"Model Clean Response: {clean_ans}") |
|
|
|
|
|
res = re_ords.get_original(res) |
|
|
|
|
|
avg_speed = total_tokens / e2e_latency if e2e_latency > 0 else 0 |
|
|
|
|
|
metric_dict = { |
|
|
"total_tokens": total_tokens, |
|
|
"e2e_latency": e2e_latency, |
|
|
"avg_speed": avg_speed, |
|
|
} |
|
|
log_metrics(**metric_dict) |
|
|
|
|
|
pbar.close() |
|
|
return res |
|
|
|
|
|
def generate_until_multi_round(self, requests) -> List[str]: |
|
|
raise NotImplementedError("TODO: Implement multi-round generation") |
|
|
|