diff --git a/VLMEvalKit-sudoku/llava/__pycache__/__init__.cpython-310.pyc b/VLMEvalKit-sudoku/llava/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a429ff7dcf4862c7dc0e60a4997fa881f52adcb9 Binary files /dev/null and b/VLMEvalKit-sudoku/llava/__pycache__/__init__.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/llava/__pycache__/conversation.cpython-310.pyc b/VLMEvalKit-sudoku/llava/__pycache__/conversation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a5ee40992187968b802b16917ee1c2592f76b8a Binary files /dev/null and b/VLMEvalKit-sudoku/llava/__pycache__/conversation.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/llava/__pycache__/slice_process.cpython-310.pyc b/VLMEvalKit-sudoku/llava/__pycache__/slice_process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da17e768747744c28bcc0361bd3dd3b5a784bdfc Binary files /dev/null and b/VLMEvalKit-sudoku/llava/__pycache__/slice_process.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mistral.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mistral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60f43f15969b84357fa0d696c21914e952d2e896 Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mistral.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mixtral.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mixtral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9e9a58ab4bc80df90fdf22b537be0384f497ccb Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mixtral.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_qwen.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_qwen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7ff85a8fc2594d4eb8515d8926b348913da48b1 Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_qwen.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/llava/model/language_model/llava_gemma.py b/VLMEvalKit-sudoku/llava/model/language_model/llava_gemma.py new file mode 100644 index 0000000000000000000000000000000000000000..5c0ac173017034bbbb03b158067d4e4f7ff970f6 --- /dev/null +++ b/VLMEvalKit-sudoku/llava/model/language_model/llava_gemma.py @@ -0,0 +1,122 @@ +# Copyright 2024 Duc Q. Nguyen, Haotian Liu and Bo Li +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from torch.nn import CrossEntropyLoss + +from transformers import AutoConfig, AutoModelForCausalLM, GemmaConfig, GemmaModel, GemmaForCausalLM + +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.generation.utils import GenerateOutput + +from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM + + +class LlavaGemmaConfig(GemmaConfig): + model_type = "llava_gemma" + + +class LlavaGemmaModel(LlavaMetaModel, GemmaModel): + config_class = LlavaGemmaConfig + + def __init__(self, config: GemmaConfig): + super(LlavaGemmaModel, self).__init__(config) + + +class LlavaGemmaForCausalLM(GemmaForCausalLM, LlavaMetaForCausalLM): + config_class = LlavaGemmaConfig + + def __init__(self, config): + super(GemmaForCausalLM, self).__init__(config) + self.model = LlavaGemmaModel(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_model(self): + return self.model + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + images: Optional[torch.FloatTensor] = None, + image_sizes: Optional[List[List[int]]] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + + if inputs_embeds is None: + (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, image_sizes) + + return super().forward( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + labels=labels, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + ) + + @torch.no_grad() + def generate( + self, + inputs: Optional[torch.Tensor] = None, + images: Optional[torch.Tensor] = None, + image_sizes: Optional[torch.Tensor] = None, + **kwargs, + ) -> Union[GenerateOutput, torch.LongTensor]: + position_ids = kwargs.pop("position_ids", None) + attention_mask = kwargs.pop("attention_mask", None) + if "inputs_embeds" in kwargs: + raise NotImplementedError("`inputs_embeds` is not supported") + + if images is not None: + (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, image_sizes=image_sizes) + else: + inputs_embeds = self.get_model().embed_tokens(inputs) + + return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + images = kwargs.pop("images", None) + image_sizes = kwargs.pop("image_sizes", None) + inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs) + if images is not None: + inputs["images"] = images + if image_sizes is not None: + inputs["image_sizes"] = image_sizes + return inputs + + +AutoConfig.register("llava_gemma", LlavaGemmaConfig) +AutoModelForCausalLM.register(LlavaGemmaConfig, LlavaGemmaForCausalLM) diff --git a/VLMEvalKit-sudoku/llava/model/language_model/llava_mixtral.py b/VLMEvalKit-sudoku/llava/model/language_model/llava_mixtral.py new file mode 100644 index 0000000000000000000000000000000000000000..ca6c25da47a1a7fbb21db1e4a5406cbfbe4242c6 --- /dev/null +++ b/VLMEvalKit-sudoku/llava/model/language_model/llava_mixtral.py @@ -0,0 +1,143 @@ +# Copyright 2023 Haotian Liu +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from torch.nn import CrossEntropyLoss + +from transformers import AutoConfig, AutoModelForCausalLM, MixtralConfig, MixtralModel, MixtralForCausalLM, GenerationConfig + +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.generation.utils import GenerateOutput + +from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM + + +class LlavaMixtralConfig(MixtralConfig): + model_type = "llava_mixtral" + + +class LlavaMixtralModel(LlavaMetaModel, MixtralModel): + config_class = LlavaMixtralConfig + + def __init__(self, config: MixtralConfig): + super(LlavaMixtralModel, self).__init__(config) + + +class LlavaMixtralForCausalLM(MixtralForCausalLM, LlavaMetaForCausalLM): + config_class = LlavaMixtralConfig + + def __init__(self, config): + super(MixtralForCausalLM, self).__init__(config) + + config.model_type = "llava_mixtral" + config.rope_scaling = None + self.model = LlavaMixtralModel(config) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing + self.post_init() + + def get_model(self): + return self.model + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + images: Optional[torch.FloatTensor] = None, + image_sizes: Optional[List[List[int]]] = None, + return_dict: Optional[bool] = None, + modalities: Optional[List[str]] = ["image"], + dpo_forward: Optional[bool] = None, + cache_position=None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + + if inputs_embeds is None: + (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes) + + if dpo_forward: + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + return logits, labels + + else: + return super().forward( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + labels=labels, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + @torch.no_grad() + def generate( + self, + inputs: Optional[torch.Tensor] = None, + images: Optional[torch.Tensor] = None, + image_sizes: Optional[torch.Tensor] = None, + modalities: Optional[List[str]] = ["image"], + **kwargs, + ) -> Union[GenerateOutput, torch.LongTensor]: + position_ids = kwargs.pop("position_ids", None) + attention_mask = kwargs.pop("attention_mask", None) + if "inputs_embeds" in kwargs: + raise NotImplementedError("`inputs_embeds` is not supported") + + if images is not None: + (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes) + else: + inputs_embeds = self.get_model().embed_tokens(inputs) + + return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + images = kwargs.pop("images", None) + image_sizes = kwargs.pop("image_sizes", None) + inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs) + if images is not None: + inputs["images"] = images + if image_sizes is not None: + inputs["image_sizes"] = image_sizes + return inputs + + +AutoConfig.register("llava_mixtral", LlavaMixtralConfig) +AutoModelForCausalLM.register(LlavaMixtralConfig, LlavaMixtralForCausalLM) diff --git a/VLMEvalKit-sudoku/llava/model/language_model/llava_qwen3.py b/VLMEvalKit-sudoku/llava/model/language_model/llava_qwen3.py new file mode 100644 index 0000000000000000000000000000000000000000..5f128f09cc84d070d4c10d042318130dee5b6fcc --- /dev/null +++ b/VLMEvalKit-sudoku/llava/model/language_model/llava_qwen3.py @@ -0,0 +1,142 @@ +from typing import List, Optional, Tuple, Union, Dict +import torch +import torch.nn as nn +from torch.nn import CrossEntropyLoss + +import transformers +from transformers import AutoConfig, AutoModelForCausalLM, LlamaConfig, LlamaModel, LlamaForCausalLM + +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.generation.utils import GenerateOutput + +from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM +from transformers import Qwen3Config, Qwen3Model, Qwen3ForCausalLM + + +class LlavaQwen3Config(Qwen3Config): + model_type = "llava_qwen3" + + +class LlavaQwen3Model(LlavaMetaModel, Qwen3Model): + config_class = LlavaQwen3Config + + def __init__(self, config: Qwen3Config): + super(LlavaQwen3Model, self).__init__(config) + + +class LlavaQwen3ForCausalLM(Qwen3ForCausalLM, LlavaMetaForCausalLM): + config_class = LlavaQwen3Config + + def __init__(self, config): + # super(Qwen3ForCausalLM, self).__init__(config) + Qwen3ForCausalLM.__init__(self, config) + config.model_type = "llava_qwen3" + config.rope_scaling = None + self.config = config + self.model = LlavaQwen3Model(config) + # self.llm_model = Qwen3Model(config) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing + self.post_init() + + def get_model(self): + return self.model + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + images: Optional[torch.FloatTensor] = None, + image_sizes: Optional[List[List[int]]] = None, + return_dict: Optional[bool] = None, + modalities: Optional[List[str]] = ["image"], + dpo_forward: Optional[bool] = False, + cache_position=None, + patch_images: Optional[torch.FloatTensor] = None, + ind_tokens: Optional[List[int]] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + + if inputs_embeds is None: + (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes,patch_images=patch_images, + ind_tokens=ind_tokens) + if dpo_forward: + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + return logits, labels + else: + output = super().forward( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + labels=labels, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + return output + + @torch.no_grad() + def generate( + self, + inputs: Optional[torch.Tensor] = None, + images: Optional[torch.Tensor] = None, + image_sizes: Optional[torch.Tensor] = None, + modalities: Optional[List[str]] = ["image"], + patch_images: Optional[torch.FloatTensor] = None, + ind_tokens: Optional[List[int]] = None, + **kwargs, + ) -> Union[GenerateOutput, torch.LongTensor]: + position_ids = kwargs.pop("position_ids", None) + attention_mask = kwargs.pop("attention_mask", None) + if "inputs_embeds" in kwargs: + raise NotImplementedError("`inputs_embeds` is not supported") + + if images is not None: + (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes, patch_images=patch_images, + ind_tokens=ind_tokens) + else: + inputs_embeds = self.get_model().embed_tokens(inputs) + + return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + images = kwargs.pop("images", None) + image_sizes = kwargs.pop("image_sizes", None) + patch_images = kwargs.pop("patch_images", None) + ind_tokens = kwargs.pop("ind_tokens", None) + inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs) + if images is not None: + inputs["images"] = images + if image_sizes is not None: + inputs["image_sizes"] = image_sizes + if patch_images is not None: + inputs['patch_images'] = patch_images + if ind_tokens is not None: + inputs['ind_tokens'] = ind_tokens + return inputs + + +AutoConfig.register("llava_qwen3", LlavaQwen3Config) +AutoModelForCausalLM.register(LlavaQwen3Config, LlavaQwen3ForCausalLM) \ No newline at end of file diff --git a/VLMEvalKit-sudoku/llava/model/language_model/llava_qwen_moe.py b/VLMEvalKit-sudoku/llava/model/language_model/llava_qwen_moe.py new file mode 100644 index 0000000000000000000000000000000000000000..618a482234d1b59eeb136063783a4111c2768fe8 --- /dev/null +++ b/VLMEvalKit-sudoku/llava/model/language_model/llava_qwen_moe.py @@ -0,0 +1,149 @@ +# Copyright 2024 Hao Zhang +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List, Optional, Tuple, Union, Dict +import torch +import torch.nn as nn +from torch.nn import CrossEntropyLoss + +import transformers +from transformers import AutoConfig, AutoModelForCausalLM + +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers.generation.utils import GenerateOutput + +# from ...constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN +from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM +from transformers import Qwen2MoeConfig, Qwen2MoeModel, Qwen2MoeForCausalLM + +# from .qwen.modeling_qwen import QWenLMHeadModel, QWenModel +# from .qwen.configuration_qwen import QWenConfig + + +class LlavaQwenMoeConfig(Qwen2MoeConfig): + model_type = "llava_qwen_moe" + + +class LlavaQwenMoeModel(LlavaMetaModel, Qwen2MoeModel): + config_class = LlavaQwenMoeConfig + + def __init__(self, config: Qwen2MoeConfig): + super(LlavaQwenMoeModel, self).__init__(config) + + +class LlavaQwenMoeForCausalLM(Qwen2MoeForCausalLM, LlavaMetaForCausalLM): + config_class = LlavaQwenMoeConfig + + def __init__(self, config): + # super(Qwen2MoeForCausalLM, self).__init__(config) + Qwen2MoeForCausalLM.__init__(self, config) + config.model_type = "llava_qwen_moe" + config.rope_scaling = None + + self.model = LlavaQwenMoeModel(config) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + # Initialize weights and apply final processing + self.post_init() + + def get_model(self): + return self.model + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + images: Optional[torch.FloatTensor] = None, + image_sizes: Optional[List[List[int]]] = None, + return_dict: Optional[bool] = None, + modalities: Optional[List[str]] = ["image"], + dpo_forward: Optional[bool] = False, + cache_position=None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + + if inputs_embeds is None: + (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes) + + if dpo_forward: + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + return logits, labels + + else: + return super().forward( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + labels=labels, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + @torch.no_grad() + def generate( + self, + inputs: Optional[torch.Tensor] = None, + images: Optional[torch.Tensor] = None, + image_sizes: Optional[torch.Tensor] = None, + modalities: Optional[List[str]] = ["image"], + **kwargs, + ) -> Union[GenerateOutput, torch.LongTensor]: + position_ids = kwargs.pop("position_ids", None) + attention_mask = kwargs.pop("attention_mask", None) + if "inputs_embeds" in kwargs: + raise NotImplementedError("`inputs_embeds` is not supported") + + if images is not None: + (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes) + else: + inputs_embeds = self.get_model().embed_tokens(inputs) + + return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + images = kwargs.pop("images", None) + image_sizes = kwargs.pop("image_sizes", None) + inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs) + if images is not None: + inputs["images"] = images + if image_sizes is not None: + inputs["image_sizes"] = image_sizes + return inputs + + +AutoConfig.register("llava_qwen_moe", LlavaQwenMoeConfig) +AutoModelForCausalLM.register(LlavaQwenMoeConfig, LlavaQwenMoeForCausalLM) diff --git a/VLMEvalKit-sudoku/llava/model/language_model/modeling_llama.py b/VLMEvalKit-sudoku/llava/model/language_model/modeling_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..0d93738b9dcde4c7b61df79ec577ed3e13edf4ed --- /dev/null +++ b/VLMEvalKit-sudoku/llava/model/language_model/modeling_llama.py @@ -0,0 +1,1649 @@ +# coding=utf-8 +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch LLaMA model.""" +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.cache_utils import Cache, DynamicCache, StaticCache +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + QuestionAnsweringModelOutput, + SequenceClassifierOutputWithPast, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS +from transformers.utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from transformers.models.llama.configuration_llama import LlamaConfig + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "LlamaConfig" + + +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +class LlamaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + LlamaRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm) + + +class LlamaRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + super().__init__() + self.scaling_factor = scaling_factor + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + # For BC we register cos and sin cached + self.max_seq_len_cached = max_position_embeddings + t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq) + t = t / self.scaling_factor + freqs = torch.outer(t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("_cos_cached", emb.cos().to(torch.get_default_dtype()), persistent=False) + self.register_buffer("_sin_cached", emb.sin().to(torch.get_default_dtype()), persistent=False) + + @property + def sin_cached(self): + logger.warning_once("The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use " "the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class") + return self._sin_cached + + @property + def cos_cached(self): + logger.warning_once("The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use " "the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class") + return self._cos_cached + + @torch.no_grad() + def forward(self, x, position_ids, seq_len=None): + if seq_len is not None: + logger.warning_once("The `seq_len` argument is deprecated and unused. It will be removed in v4.39.") + + # x: [bs, num_attention_heads, seq_len, head_size] + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.cat((freqs, freqs), dim=-1) + cos = emb.cos() + sin = emb.sin() + return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) + + +class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def forward(self, x, position_ids, seq_len=None): + # difference to the original RoPE: a scaling factor is aplied to the position ids + position_ids = position_ids.float() / self.scaling_factor + cos, sin = super().forward(x, position_ids, seq_len) + return cos, sin + + +class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def forward(self, x, position_ids, seq_len=None): + # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length + seq_len = torch.max(position_ids) + 1 + if seq_len > self.max_position_embeddings: + base = self.base * ((self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: this may break with compilation + + cos, sin = super().forward(x, position_ids, seq_len) + return cos, sin + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class LlamaMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + if self.config.pretraining_tp > 1: + slice = self.intermediate_size // self.config.pretraining_tp + gate_proj_slices = self.gate_proj.weight.split(slice, dim=0) + up_proj_slices = self.up_proj.weight.split(slice, dim=0) + down_proj_slices = self.down_proj.weight.split(slice, dim=1) + + gate_proj = torch.cat([F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1) + up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1) + + intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2) + down_proj = [F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)] + down_proj = sum(down_proj) + else: + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + return down_proj + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class LlamaAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " + "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.attention_dropout = config.attention_dropout + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError(f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads}).") + + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) + self._init_rope() + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = LlamaRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + if scaling_type == "linear": + self.rotary_emb = LlamaLinearScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + elif scaling_type == "dynamic": + self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor, + base=self.rope_theta, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + if self.config.pretraining_tp > 1: + key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp + query_slices = self.q_proj.weight.split((self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0) + key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) + value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) + + query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)] + query_states = torch.cat(query_states, dim=-1) + + key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)] + key_states = torch.cat(key_states, dim=-1) + + value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)] + value_states = torch.cat(value_states, dim=-1) + + else: + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + past_key_value = getattr(self, "past_key_value", past_key_value) + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; position_ids needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask + if cache_position is not None: + causal_mask = attention_mask[:, :, cache_position, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError(f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}") + + attn_output = attn_output.transpose(1, 2).contiguous() + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + if self.config.pretraining_tp > 1: + attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2) + o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1) + attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)]) + else: + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaRingFlashAttention2(LlamaAttention): + """ + Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we just need to keep the original shape + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + past_key_value = getattr(self, "past_key_value", past_key_value) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; position_ids needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache + # to be able to avoid many of these transpose/reshape/view. + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + dropout_rate = self.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward(self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(query_states, key_states, value_states, attention_mask, query_length) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = zigzag_ring_flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + # pack qkv + # query_states: (batch_size, seqlen, nheads, headdim) + # qkv: (batch_size, seqlen, 3, nheads, headdim) + qkv = torch.stack([query_states, key_states, value_states], dim=2) + attn_output = zigzag_ring_flash_attn_qkvpacked_func(qkv, dropout, softmax_scale, causal=causal) + + return attn_output + + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k) + value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k) + if query_length == kv_seq_len: + query_layer = index_first_axis(query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange(batch_size + 1, dtype=torch.int32, device=query_layer.device) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +class LlamaFlashAttention2(LlamaAttention): + """ + Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + # therefore we just need to keep the original shape + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + past_key_value = getattr(self, "past_key_value", past_key_value) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; position_ids needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache + # to be able to avoid many of these transpose/reshape/view. + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + dropout_rate = self.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward(self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`int`, *optional*): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(query_states, key_states, value_states, attention_mask, query_length) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func(query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal) + + return attn_output + + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k) + value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k) + if query_length == kv_seq_len: + query_layer = index_first_axis(query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange(batch_size + 1, dtype=torch.int32, device=query_layer.device) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +class LlamaSdpaAttention(LlamaAttention): + """ + Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Adapted from LlamaAttention.forward + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + # In case static cache is used, it is an instance attribute. + past_key_value = getattr(self, "past_key_value", past_key_value) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; position_ids needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + if attention_mask is not None and cache_position is not None: + causal_mask = causal_mask[:, :, cache_position, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and causal_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +try: + from ring_flash_attn import zigzag_ring_flash_attn_qkvpacked_func, zigzag_ring_flash_attn_varlen_func +except ImportError: + print("Please install the ring-flash-attn package") + +LLAMA_ATTENTION_CLASSES = { + "eager": LlamaAttention, + "flash_attention_2": LlamaFlashAttention2, + "ring_flash_attention_2": LlamaRingFlashAttention2, + "sdpa": LlamaSdpaAttention, +} + + +class LlamaDecoderLayer(nn.Module): + def __init__(self, config: LlamaConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) + + self.mlp = LlamaMLP(config) + self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + if "padding_mask" in kwargs: + warnings.warn("Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`") + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +LLAMA_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`LlamaConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaPreTrainedModel(PreTrainedModel): + config_class = LlamaConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["LlamaDecoderLayer"] + _skip_keys_device_placement = ["past_key_values", "causal_mask"] + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None): + if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache: + raise ValueError("`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers") + + if max_cache_len > self.model.causal_mask.shape[-1] or self.device != self.model.causal_mask.device: + causal_mask = torch.full((max_cache_len, max_cache_len), fill_value=True, device=self.device, dtype=torch.bool) + self.register_buffer("causal_mask", torch.triu(causal_mask, diagonal=1), persistent=False) + + for layer in self.model.layers: + device = layer.input_layernorm.weight.device + if hasattr(self.config, "_pre_quantization_dtype"): + dtype = self.config._pre_quantization_dtype + else: + dtype = layer.self_attn.o_proj.weight.dtype + layer.self_attn.past_key_value = cache_cls(self.config, max_batch_size, max_cache_len, device=device, dtype=dtype) + + def _reset_cache(self): + for layer in self.model.layers: + layer.self_attn.past_key_value = None + + +LLAMA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", + LLAMA_START_DOCSTRING, +) +class LlamaModel(LlamaPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] + + Args: + config: LlamaConfig + """ + + def __init__(self, config: LlamaConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList([LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) + self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.gradient_checkpointing = False + + # Register a causal mask to separate causal and padding mask creation. Merging happens in the attention class. + # NOTE: This is not friendly with TorchScript, ONNX, ExportedProgram serialization for very large `max_position_embeddings`. + causal_mask = torch.full((config.max_position_embeddings, config.max_position_embeddings), fill_value=True, dtype=torch.bool) + self.register_buffer("causal_mask", torch.triu(causal_mask, diagonal=1), persistent=False) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one") + + if self.gradient_checkpointing and self.training and use_cache: + logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.") + use_cache = False + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + past_seen_tokens = 0 + if use_cache: # kept for BC (cache positions) + if not isinstance(past_key_values, StaticCache): + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_seen_tokens = past_key_values.get_seq_length() + + if cache_position is None: + if isinstance(past_key_values, StaticCache): + raise ValueError("cache_position is a required argument when using StaticCache.") + cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) + + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = self._update_causal_mask(attention_mask, inputs_embeds) + + # embed positions + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static + # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. + # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using + # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 + def _update_causal_mask(self, attention_mask, input_tensor): + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + batch_size, seq_length = input_tensor.shape[:2] + dtype = input_tensor.dtype + device = input_tensor.device + + # support going beyond cached `max_position_embedding` + if seq_length > self.causal_mask.shape[-1]: + causal_mask = torch.full((2 * self.causal_mask.shape[-1], 2 * self.causal_mask.shape[-1]), fill_value=1) + self.register_buffer("causal_mask", torch.triu(causal_mask, diagonal=1), persistent=False) + + # We use the current dtype to avoid any overflows + min_dtype = torch.finfo(dtype).min + causal_mask = self.causal_mask[None, None, :, :].repeat(batch_size, 1, 1, 1).to(dtype) * min_dtype + + causal_mask = causal_mask.to(dtype=dtype, device=device) + if attention_mask is not None and attention_mask.dim() == 2: + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0) + causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype) + + if self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda": + # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400). + is_tracing = torch.jit.is_tracing() or isinstance(input_tensor, torch.fx.Proxy) or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling()) + if not is_tracing and torch.any(attention_mask != 1): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + +class LlamaForCausalLM(LlamaPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = LlamaModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, LlamaForCausalLM + + >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf") + >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") + + >>> prompt = "Hey, are you conscious? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + ) + + hidden_states = outputs[0] + if self.config.pretraining_tp > 1: + lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0) + logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)] + logits = torch.cat(logits, dim=-1) + else: + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs): + past_length = 0 + if past_key_values is not None: + if isinstance(past_key_values, Cache): + cache_length = past_key_values.get_seq_length() + past_length = past_key_values.seen_tokens + max_cache_length = past_key_values.get_max_length() + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if max_cache_length is not None and attention_mask is not None and cache_length + input_ids.shape[1] > max_cache_length: + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + if self.generation_config.cache_implementation == "static": + # generation with static cache + cache_position = kwargs.get("cache_position", None) + if cache_position is None: + past_length = 0 + else: + past_length = cache_position[-1] + 1 + input_ids = input_ids[:, past_length:] + position_ids = position_ids[:, past_length:] + + # TODO @gante we should only keep a `cache_position` in generate, and do +=1. + # same goes for position ids. Could also help with continued generation. + input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1] + cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device) + position_ids = position_ids.contiguous() if position_ids is not None else None + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise + # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114 + # TODO: use `next_tokens` directly instead. + model_inputs = {"input_ids": input_ids.contiguous()} + + model_inputs.update( + { + "position_ids": position_ids, + "cache_position": cache_position, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += (tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),) + return reordered_past + + +@add_start_docstrings( + """ + The LLaMa Model transformer with a sequence classification head on top (linear layer). + + [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-2) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + LLAMA_START_DOCSTRING, +) +class LlamaForSequenceClassification(LlamaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.model = LlamaModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size = input_ids.shape[0] + else: + batch_size = inputs_embeds.shape[0] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + labels = labels.to(logits.device) + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ +The Llama Model transformer with a span classification head on top for extractive question-answering tasks like +SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + LLAMA_START_DOCSTRING, +) +class LlamaForQuestionAnswering(LlamaPreTrainedModel): + base_model_prefix = "transformer" + + # Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Llama + def __init__(self, config): + super().__init__(config) + self.transformer = LlamaModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, 2) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.transformer.embed_tokens + + def set_input_embeddings(self, value): + self.transformer.embed_tokens = value + + @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.transformer( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1).to(start_logits.device) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1).to(end_logits.device) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_resampler/__pycache__/perceiver.cpython-310.pyc b/VLMEvalKit-sudoku/llava/model/multimodal_resampler/__pycache__/perceiver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95b0810b01f025f57974b6fcfc48277adf269177 Binary files /dev/null and b/VLMEvalKit-sudoku/llava/model/multimodal_resampler/__pycache__/perceiver.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_resampler/qformer.py b/VLMEvalKit-sudoku/llava/model/multimodal_resampler/qformer.py new file mode 100644 index 0000000000000000000000000000000000000000..b86754c24adbfa5ce34e37ee4726c74e3b7f910f --- /dev/null +++ b/VLMEvalKit-sudoku/llava/model/multimodal_resampler/qformer.py @@ -0,0 +1,1160 @@ +""" + * Copyright (c) 2023, salesforce.com, inc. + * All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause + * By Junnan Li + * Based on huggingface code base + * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert +""" + +import math +import os +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple, Dict, Any + +import torch +from torch import Tensor, device, dtype, nn +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss +import torch.nn.functional as F + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + ModelOutput, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging +from transformers.models.bert.configuration_bert import BertConfig + +logger = logging.get_logger(__name__) + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward( + self, + input_ids=None, + position_ids=None, + query_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + seq_length = input_ids.size()[1] + else: + seq_length = 0 + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone() + + if input_ids is not None: + embeddings = self.word_embeddings(input_ids) + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings = embeddings + position_embeddings + + if query_embeds is not None: + embeddings = torch.cat((query_embeds, embeddings), dim=1) + else: + embeddings = query_embeds + + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError("The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + ( + self.num_attention_heads, + self.attention_head_size, + ) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + mixed_query_layer = self.query(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.self.num_attention_heads, + self.self.attention_head_size, + self.pruned_heads, + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if self.config.add_cross_attention and layer_num % self.config.cross_attention_freq == 0: + self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) + self.has_cross_attention = True + else: + self.has_cross_attention = False + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + self.intermediate_query = BertIntermediate(config) + self.output_query = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + query_length=0, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:-1] + + present_key_value = self_attention_outputs[-1] + + if query_length > 0: + query_attention_output = attention_output[:, :query_length, :] + + if self.has_cross_attention: + assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" + cross_attention_outputs = self.crossattention( + query_attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + query_attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk_query, + self.chunk_size_feed_forward, + self.seq_len_dim, + query_attention_output, + ) + if attention_output.shape[1] > query_length: + layer_output_text = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output[:, query_length:, :], + ) + layer_output = torch.cat([layer_output, layer_output_text], dim=1) + else: + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output,) + outputs + + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + def feed_forward_chunk_query(self, attention_output): + intermediate_output = self.intermediate_query(attention_output) + layer_output = self.output_query(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([BertLayer(config, i) for i in range(config.num_hidden_layers)]) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + query_length=0, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if getattr(self.config, "gradient_checkpointing", False) and self.training: + + if use_cache: + logger.warn("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...") + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions, query_length) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + query_length, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + base_model_prefix = "bert" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """ + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in `Attention is + all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an + input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=False): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask( + self, + attention_mask: Tensor, + input_shape: Tuple[int], + device: device, + is_decoder: bool, + has_query: bool = False, + ) -> Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] + + # add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] + if has_query: # UniLM style attention mask + causal_mask = torch.cat( + [ + torch.zeros( + (batch_size, prefix_seq_len, seq_length), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=1, + ) + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, causal_mask.shape[1], prefix_seq_len), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=-1, + ) + extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError("Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(input_shape, attention_mask.shape)) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # use_cache = use_cache if use_cache is not None else self.config.use_cache + + if input_ids is None: + assert query_embeds is not None, "You have to specify query_embeds when input_ids is None" + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 + + query_length = query_embeds.shape[1] if query_embeds is not None else 0 + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + query_embeds=query_embeds, + past_key_values_length=past_key_values_length, + ) + + input_shape = embedding_output.size()[:-1] + batch_size, seq_length = input_shape + device = embedding_output.device + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if is_decoder: + extended_attention_mask = self.get_extended_attention_mask( + attention_mask, + input_ids.shape, + device, + is_decoder, + has_query=(query_embeds is not None), + ) + else: + extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() + else: + ( + encoder_batch_size, + encoder_sequence_length, + _, + ) = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + query_length=query_length, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=True, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction="mean", + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + if past_key_values is not None: + query_embeds = None + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + query_embeds=query_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + ) + + sequence_output = outputs[0] + if query_embeds is not None: + sequence_output = outputs[0][:, query_embeds.shape[1] :, :] + + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1), + ) + if reduction == "none": + lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_ids.shape) + query_mask = input_ids.new_ones(query_embeds.shape[:-1]) + attention_mask = torch.cat([query_mask, attention_mask], dim=-1) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + "input_ids": input_ids, + "query_embeds": query_embeds, + "attention_mask": attention_mask, + "past_key_values": past, + "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), + "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), + "is_decoder": True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past + + +class BertForMaskedLM(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=False, + ): + r""" + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., + config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored + (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + query_embeds=query_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + ) + + if query_embeds is not None: + sequence_output = outputs[0][:, query_embeds.shape[1] :, :] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class Qformer(nn.Module): + def __init__(self, model_args, vision_tower): + super().__init__() + + self.depth = model_args.mm_qformer_depth + self.num_latents = model_args.mm_qformer_latents + self.pretrained = model_args.mm_qformer_pretrained + + self.Qformer, self.query_tokens, self.ln_vision = self.build_Qformer(vision_tower.hidden_size, self.depth, self.num_latents) + + if self.pretrained is not None: + pretrained_dict = torch.load(self.pretrained, map_location="cpu")["model"] + pretrained_dict = {k: v for k, v in pretrained_dict.items() if not k.startswith("t5_proj")} + self.load_state_dict(pretrained_dict) + + def build_Qformer(self, vision_width, cross_attention_freq, num_query_token): + encoder_config = BertConfig.from_pretrained("bert-base-uncased") + encoder_config.encoder_width = vision_width + # insert cross-attention layer every other block + encoder_config.add_cross_attention = True + encoder_config.cross_attention_freq = cross_attention_freq + encoder_config.query_length = num_query_token + Qformer = BertLMHeadModel(config=encoder_config) + query_tokens = nn.Parameter(torch.zeros(1, num_query_token, encoder_config.hidden_size)) + query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) + Qformer.cls = None + Qformer.bert.embeddings.word_embeddings = None + Qformer.bert.embeddings.position_embeddings = None + for layer in Qformer.bert.encoder.layer: + layer.output = None + layer.intermediate = None + return Qformer, query_tokens, nn.LayerNorm(vision_width) + + def forward(self, image_features, *args, **kwargs): + x = self.ln_vision(image_features) + image_atts = torch.ones(x.size()[:-1], dtype=torch.long).to(x.device) + + query_tokens = self.query_tokens.expand(x.shape[0], -1, -1) + query_output = self.Qformer.bert( + query_embeds=query_tokens, + encoder_hidden_states=x, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + return query_output.last_hidden_state + + @property + def hidden_size(self): + return 768 + + @property + def config(self): + return { + "mm_resampler_type": "qformer", + "mm_qformer_depth": self.depth, + "mm_qformer_latents": self.num_latents, + "mm_qformer_pretrained": self.pretrained, + } diff --git a/VLMEvalKit-sudoku/llava/model/multimodal_resampler/spatial_pool.py b/VLMEvalKit-sudoku/llava/model/multimodal_resampler/spatial_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..4bdbe3aecc91183341816c800c8ad1fcfba9a169 --- /dev/null +++ b/VLMEvalKit-sudoku/llava/model/multimodal_resampler/spatial_pool.py @@ -0,0 +1,45 @@ +import torch +import torch.nn as nn +import math + + +class SpatialPool(nn.Module): + def __init__(self, model_args, vision_tower): + super().__init__() + + self.mode = model_args.mm_spatial_pool_mode + self.stride = model_args.mm_spatial_pool_stride + self.out_channels = getattr(model_args, "mm_spatial_pool_out_channels", vision_tower.hidden_size) + + if self.mode == "average": + self.pool = nn.AvgPool2d(kernel_size=self.stride, stride=self.stride) + elif self.mode == "max": + self.pool = nn.MaxPool2d(kernel_size=self.stride, stride=self.stride) + elif self.mode == "conv": + self.pool = nn.Conv2d(in_channels=vision_tower.hidden_size, out_channels=self.out_channels, kernel_size=self.stride, stride=self.stride) + else: + raise ValueError(f"Unknown pooling mode: {self.pool}.") + + def forward(self, image_features, images, *args, **kwargs): + ori_W = int(math.sqrt(image_features.shape[1] * images.shape[3] // images.shape[2])) + ori_H = int(ori_W * images.shape[2] // images.shape[3]) + + B, _, F = image_features.shape + + image_features_spatial = image_features.view(B, ori_H, ori_H, F).permute(0, 3, 1, 2) + image_features_spatial_pool = self.pool(image_features_spatial) + + return image_features_spatial_pool.flatten(2).transpose(1, 2).contiguous() + + @property + def config(self): + return { + "mm_resampler_type": "spatial_pool", + "mm_spatial_pool_stride": self.stride, + "mm_spatial_pool_mode": self.mode, + "mm_spatial_pool_out_channels": self.out_channels, + } + + @property + def hidden_size(self): + return self.out_channels diff --git a/VLMEvalKit-sudoku/llava/serve/cli.py b/VLMEvalKit-sudoku/llava/serve/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..88fbfe85dac4e385962c8c83e074af0fa34c2353 --- /dev/null +++ b/VLMEvalKit-sudoku/llava/serve/cli.py @@ -0,0 +1,111 @@ +import argparse +import torch + +from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN +from llava.conversation import conv_templates, SeparatorStyle +from llava.model.builder import load_pretrained_model +from llava.utils import disable_torch_init +from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria + +from PIL import Image + +import requests +from PIL import Image +from io import BytesIO +from transformers import TextStreamer + + +def load_image(image_file): + if image_file.startswith("http") or image_file.startswith("https"): + response = requests.get(image_file) + image = Image.open(BytesIO(response.content)).convert("RGB") + else: + image = Image.open(image_file).convert("RGB") + return image + + +def main(args): + # Model + disable_torch_init() + + model_name = get_model_name_from_path(args.model_path) + tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit) + + if "llama-2" in model_name.lower(): + conv_mode = "llava_llama_2" + elif "v1" in model_name.lower(): + conv_mode = "llava_v1" + elif "mpt" in model_name.lower(): + conv_mode = "mpt" + else: + conv_mode = "llava_v0" + + if args.conv_mode is not None and conv_mode != args.conv_mode: + print("[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}".format(conv_mode, args.conv_mode, args.conv_mode)) + else: + args.conv_mode = conv_mode + + conv = conv_templates[args.conv_mode].copy() + if "mpt" in model_name.lower(): + roles = ("user", "assistant") + else: + roles = conv.roles + + image = load_image(args.image_file) + image_tensor = image_processor.preprocess(image, return_tensors="pt")["pixel_values"].half().cuda() + + while True: + try: + inp = input(f"{roles[0]}: ") + except EOFError: + inp = "" + if not inp: + print("exit...") + break + + print(f"{roles[1]}: ", end="") + + if image is not None: + # first message + if model.config.mm_use_im_start_end: + inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + "\n" + inp + else: + inp = DEFAULT_IMAGE_TOKEN + "\n" + inp + conv.append_message(conv.roles[0], inp) + image = None + else: + # later messages + conv.append_message(conv.roles[0], inp) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + + input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).cuda() + stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 + keywords = [stop_str] + stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids) + streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) + + with torch.inference_mode(): + output_ids = model.generate(input_ids, images=image_tensor, do_sample=True, temperature=0.2, max_new_tokens=1024, streamer=streamer, use_cache=True, stopping_criteria=[stopping_criteria]) + + outputs = tokenizer.decode(output_ids[0, input_ids.shape[1] :]).strip() + conv.messages[-1][-1] = outputs + + if args.debug: + print("\n", {"prompt": prompt, "outputs": outputs}, "\n") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model-path", type=str, default="facebook/opt-350m") + parser.add_argument("--model-base", type=str, default=None) + parser.add_argument("--image-file", type=str, required=True) + parser.add_argument("--num-gpus", type=int, default=1) + parser.add_argument("--conv-mode", type=str, default=None) + parser.add_argument("--temperature", type=float, default=0.2) + parser.add_argument("--max-new-tokens", type=int, default=512) + parser.add_argument("--load-8bit", action="store_true") + parser.add_argument("--load-4bit", action="store_true") + parser.add_argument("--debug", action="store_true") + args = parser.parse_args() + main(args) diff --git a/VLMEvalKit-sudoku/llava/serve/controller.py b/VLMEvalKit-sudoku/llava/serve/controller.py new file mode 100644 index 0000000000000000000000000000000000000000..261f8c6bd4461e723fff4c7a7557fe10d592bca9 --- /dev/null +++ b/VLMEvalKit-sudoku/llava/serve/controller.py @@ -0,0 +1,287 @@ +""" +A controller manages distributed workers. +It sends worker addresses to clients. +""" + +import argparse +import asyncio +import dataclasses +from enum import Enum, auto +import json +import logging +import time +from typing import List, Union +import threading + +from fastapi import FastAPI, Request +from fastapi.responses import StreamingResponse +import numpy as np +import requests +import uvicorn + +from llava.constants import CONTROLLER_HEART_BEAT_EXPIRATION +from llava.utils import build_logger, server_error_msg + + +logger = build_logger("controller", "controller.log") + + +class DispatchMethod(Enum): + LOTTERY = auto() + SHORTEST_QUEUE = auto() + + @classmethod + def from_str(cls, name): + if name == "lottery": + return cls.LOTTERY + elif name == "shortest_queue": + return cls.SHORTEST_QUEUE + else: + raise ValueError(f"Invalid dispatch method") + + +@dataclasses.dataclass +class WorkerInfo: + model_names: List[str] + speed: int + queue_length: int + check_heart_beat: bool + last_heart_beat: str + + +def heart_beat_controller(controller): + while True: + time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION) + controller.remove_stable_workers_by_expiration() + + +class Controller: + def __init__(self, dispatch_method: str): + # Dict[str -> WorkerInfo] + self.worker_info = {} + self.dispatch_method = DispatchMethod.from_str(dispatch_method) + + self.heart_beat_thread = threading.Thread(target=heart_beat_controller, args=(self,)) + self.heart_beat_thread.start() + + logger.info("Init controller") + + def register_worker(self, worker_name: str, check_heart_beat: bool, worker_status: dict): + if worker_name not in self.worker_info: + logger.info(f"Register a new worker: {worker_name}") + else: + logger.info(f"Register an existing worker: {worker_name}") + + if not worker_status: + worker_status = self.get_worker_status(worker_name) + if not worker_status: + return False + + self.worker_info[worker_name] = WorkerInfo(worker_status["model_names"], worker_status["speed"], worker_status["queue_length"], check_heart_beat, time.time()) + + logger.info(f"Register done: {worker_name}, {worker_status}") + return True + + def get_worker_status(self, worker_name: str): + try: + r = requests.post(worker_name + "/worker_get_status", timeout=5) + except requests.exceptions.RequestException as e: + logger.error(f"Get status fails: {worker_name}, {e}") + return None + + if r.status_code != 200: + logger.error(f"Get status fails: {worker_name}, {r}") + return None + + return r.json() + + def remove_worker(self, worker_name: str): + del self.worker_info[worker_name] + + def refresh_all_workers(self): + old_info = dict(self.worker_info) + self.worker_info = {} + + for w_name, w_info in old_info.items(): + if not self.register_worker(w_name, w_info.check_heart_beat, None): + logger.info(f"Remove stale worker: {w_name}") + + def list_models(self): + model_names = set() + + for w_name, w_info in self.worker_info.items(): + model_names.update(w_info.model_names) + + return list(model_names) + + def get_worker_address(self, model_name: str): + if self.dispatch_method == DispatchMethod.LOTTERY: + worker_names = [] + worker_speeds = [] + for w_name, w_info in self.worker_info.items(): + if model_name in w_info.model_names: + worker_names.append(w_name) + worker_speeds.append(w_info.speed) + worker_speeds = np.array(worker_speeds, dtype=np.float32) + norm = np.sum(worker_speeds) + if norm < 1e-4: + return "" + worker_speeds = worker_speeds / norm + if True: # Directly return address + pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) + worker_name = worker_names[pt] + return worker_name + + # Check status before returning + while True: + pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) + worker_name = worker_names[pt] + + if self.get_worker_status(worker_name): + break + else: + self.remove_worker(worker_name) + worker_speeds[pt] = 0 + norm = np.sum(worker_speeds) + if norm < 1e-4: + return "" + worker_speeds = worker_speeds / norm + continue + return worker_name + elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE: + worker_names = [] + worker_qlen = [] + for w_name, w_info in self.worker_info.items(): + if model_name in w_info.model_names: + worker_names.append(w_name) + worker_qlen.append(w_info.queue_length / w_info.speed) + if len(worker_names) == 0: + return "" + min_index = np.argmin(worker_qlen) + w_name = worker_names[min_index] + self.worker_info[w_name].queue_length += 1 + logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}") + return w_name + else: + raise ValueError(f"Invalid dispatch method: {self.dispatch_method}") + + def receive_heart_beat(self, worker_name: str, queue_length: int): + if worker_name not in self.worker_info: + logger.info(f"Receive unknown heart beat. {worker_name}") + return False + + self.worker_info[worker_name].queue_length = queue_length + self.worker_info[worker_name].last_heart_beat = time.time() + logger.info(f"Receive heart beat. {worker_name}") + return True + + def remove_stable_workers_by_expiration(self): + expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION + to_delete = [] + for worker_name, w_info in self.worker_info.items(): + if w_info.check_heart_beat and w_info.last_heart_beat < expire: + to_delete.append(worker_name) + + for worker_name in to_delete: + self.remove_worker(worker_name) + + def worker_api_generate_stream(self, params): + worker_addr = self.get_worker_address(params["model"]) + if not worker_addr: + logger.info(f"no worker: {params['model']}") + ret = { + "text": server_error_msg, + "error_code": 2, + } + yield json.dumps(ret).encode() + b"\0" + + try: + response = requests.post(worker_addr + "/worker_generate_stream", json=params, stream=True, timeout=5) + for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"): + if chunk: + yield chunk + b"\0" + except requests.exceptions.RequestException as e: + logger.info(f"worker timeout: {worker_addr}") + ret = { + "text": server_error_msg, + "error_code": 3, + } + yield json.dumps(ret).encode() + b"\0" + + # Let the controller act as a worker to achieve hierarchical + # management. This can be used to connect isolated sub networks. + def worker_api_get_status(self): + model_names = set() + speed = 0 + queue_length = 0 + + for w_name in self.worker_info: + worker_status = self.get_worker_status(w_name) + if worker_status is not None: + model_names.update(worker_status["model_names"]) + speed += worker_status["speed"] + queue_length += worker_status["queue_length"] + + return { + "model_names": list(model_names), + "speed": speed, + "queue_length": queue_length, + } + + +app = FastAPI() + + +@app.post("/register_worker") +async def register_worker(request: Request): + data = await request.json() + controller.register_worker(data["worker_name"], data["check_heart_beat"], data.get("worker_status", None)) + + +@app.post("/refresh_all_workers") +async def refresh_all_workers(): + models = controller.refresh_all_workers() + + +@app.post("/list_models") +async def list_models(): + models = controller.list_models() + return {"models": models} + + +@app.post("/get_worker_address") +async def get_worker_address(request: Request): + data = await request.json() + addr = controller.get_worker_address(data["model"]) + return {"address": addr} + + +@app.post("/receive_heart_beat") +async def receive_heart_beat(request: Request): + data = await request.json() + exist = controller.receive_heart_beat(data["worker_name"], data["queue_length"]) + return {"exist": exist} + + +@app.post("/worker_generate_stream") +async def worker_api_generate_stream(request: Request): + params = await request.json() + generator = controller.worker_api_generate_stream(params) + return StreamingResponse(generator) + + +@app.post("/worker_get_status") +async def worker_api_get_status(request: Request): + return controller.worker_api_get_status() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--port", type=int, default=21001) + parser.add_argument("--dispatch-method", type=str, choices=["lottery", "shortest_queue"], default="shortest_queue") + args = parser.parse_args() + logger.info(f"args: {args}") + + controller = Controller(args.dispatch_method) + uvicorn.run(app, host=args.host, port=args.port, log_level="info") diff --git a/VLMEvalKit-sudoku/llava/serve/test_message.py b/VLMEvalKit-sudoku/llava/serve/test_message.py new file mode 100644 index 0000000000000000000000000000000000000000..45acd534fb23fdc9c85d6dd0575b192cabc0da41 --- /dev/null +++ b/VLMEvalKit-sudoku/llava/serve/test_message.py @@ -0,0 +1,59 @@ +import argparse +import json + +import requests + +from llava.conversation import default_conversation + + +def main(): + if args.worker_address: + worker_addr = args.worker_address + else: + controller_addr = args.controller_address + ret = requests.post(controller_addr + "/refresh_all_workers") + ret = requests.post(controller_addr + "/list_models") + models = ret.json()["models"] + models.sort() + print(f"Models: {models}") + + ret = requests.post(controller_addr + "/get_worker_address", json={"model": args.model_name}) + worker_addr = ret.json()["address"] + print(f"worker_addr: {worker_addr}") + + if worker_addr == "": + return + + conv = default_conversation.copy() + conv.append_message(conv.roles[0], args.message) + prompt = conv.get_prompt() + + headers = {"User-Agent": "LLaVA Client"} + pload = { + "model": args.model_name, + "prompt": prompt, + "max_new_tokens": args.max_new_tokens, + "temperature": 0.7, + "stop": conv.sep, + } + response = requests.post(worker_addr + "/worker_generate_stream", headers=headers, json=pload, stream=True) + + print(prompt.replace(conv.sep, "\n"), end="") + for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"): + if chunk: + data = json.loads(chunk.decode("utf-8")) + output = data["text"].split(conv.sep)[-1] + print(output, end="\r") + print("") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--controller-address", type=str, default="http://localhost:21001") + parser.add_argument("--worker-address", type=str) + parser.add_argument("--model-name", type=str, default="facebook/opt-350m") + parser.add_argument("--max-new-tokens", type=int, default=32) + parser.add_argument("--message", type=str, default="Tell me a story with more than 1000 words.") + args = parser.parse_args() + + main() diff --git a/VLMEvalKit-sudoku/llava/train/__pycache__/llava_trainer.cpython-310.pyc b/VLMEvalKit-sudoku/llava/train/__pycache__/llava_trainer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3417a5983ffb5d2b044227edb3a92793dcd0d901 Binary files /dev/null and b/VLMEvalKit-sudoku/llava/train/__pycache__/llava_trainer.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/llava/train/__pycache__/train.cpython-310.pyc b/VLMEvalKit-sudoku/llava/train/__pycache__/train.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20fc05cb61dcd6f2ad63fdc1e1705c9859e50773 Binary files /dev/null and b/VLMEvalKit-sudoku/llava/train/__pycache__/train.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/llava/train/llama_flash_attn_monkey_patch.py b/VLMEvalKit-sudoku/llava/train/llama_flash_attn_monkey_patch.py new file mode 100644 index 0000000000000000000000000000000000000000..c88fe34266d5467cf49ba0ad4fbc5f5eeac5c029 --- /dev/null +++ b/VLMEvalKit-sudoku/llava/train/llama_flash_attn_monkey_patch.py @@ -0,0 +1,87 @@ +from typing import Optional, Tuple +import warnings + +import torch + +import transformers +from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv + +try: + from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func +except ImportError: + from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func +from flash_attn.bert_padding import unpad_input, pad_input + + +def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + padding_mask: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + warnings.warn("Output attentions is not supported for patched `LlamaAttention`, returning `None` instead.") + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) # shape: (b, num_heads, s, head_dim) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # reuse k, v + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + # Transform the data into the format required by flash attention + qkv = torch.stack([query_states, key_states, value_states], dim=2) + qkv = qkv.transpose(1, 3) # shape: [b, s, 3, num_heads, head_dim] + key_padding_mask = attention_mask + + if key_padding_mask is None: + qkv = qkv.reshape(-1, 3, self.num_heads, self.head_dim) + cu_q_lens = torch.arange(0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device) + max_s = q_len + output = flash_attn_unpadded_qkvpacked_func(qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True) + output = output.view(bsz, q_len, -1) + else: + qkv = qkv.reshape(bsz, q_len, -1) + qkv, indices, cu_q_lens, max_s = unpad_input(qkv, key_padding_mask) + qkv = qkv.view(-1, 3, self.num_heads, self.head_dim) + output_unpad = flash_attn_unpadded_qkvpacked_func(qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True) + output_unpad = output_unpad.reshape(-1, self.num_heads * self.head_dim) + output = pad_input(output_unpad, indices, bsz, q_len) + + return self.o_proj(output), None, past_key_value + + +# Disable the transformation of the attention mask in LlamaModel as the flash attention +# requires the attention mask to be the same as the key_padding_mask +def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # [bsz, seq_len] + return attention_mask + + +def replace_llama_attn_with_flash_attn(): + cuda_major, cuda_minor = torch.cuda.get_device_capability() + if cuda_major < 8: + warnings.warn("Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward." "ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593") + transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask + transformers.models.llama.modeling_llama.LlamaAttention.forward = forward diff --git a/VLMEvalKit-sudoku/llava/train/llava_trainer.py b/VLMEvalKit-sudoku/llava/train/llava_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..7adc347474a3295807b38fdf3b0b61d45f1a18fb --- /dev/null +++ b/VLMEvalKit-sudoku/llava/train/llava_trainer.py @@ -0,0 +1,557 @@ +import os +import torch +import torch.nn as nn +import datetime + +from accelerate import Accelerator +from accelerate.utils import InitProcessGroupKwargs, GradientAccumulationPlugin, DataLoaderConfiguration +from torch.utils.data import Dataset, Sampler, DataLoader + +from trl.trainer import DPOTrainer +from trl.trainer.utils import DPODataCollatorWithPadding + +from transformers import Trainer + + +from transformers.utils import is_sagemaker_mp_enabled, logging, is_accelerate_available, is_datasets_available +logger = logging.get_logger(__name__) +from transformers.trainer_pt_utils import get_parameter_names +from transformers.trainer_utils import has_length +from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS + +from transformers.trainer_utils import seed_worker +from transformers.trainer_pt_utils import get_length_grouped_indices as get_length_grouped_indices_hf +from transformers.trainer_pt_utils import AcceleratorConfig +from typing import List, Optional +from datetime import timedelta + +if is_accelerate_available(): + from accelerate import Accelerator, skip_first_batches, InitProcessGroupKwargs + +if is_datasets_available(): + import datasets + +from llava.utils import rank0_print + + +def maybe_zero_3(param, ignore_status=False, name=None): + from deepspeed import zero + from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus + + if hasattr(param, "ds_id"): + if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + if not ignore_status: + print(name, "no ignore status") + with zero.GatheredParameters([param]): + param = param.data.detach().cpu().clone() + else: + param = param.detach().cpu().clone() + return param + + +def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match): + to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)} + to_return = {k: maybe_zero_3(v, ignore_status=True, name=k).cpu() for k, v in to_return.items()} + return to_return + + +def split_to_even_chunks(indices, lengths, num_chunks): + """ + Split a list of indices into `chunks` chunks of roughly equal lengths. + """ + + if len(indices) % num_chunks != 0: + return [indices[i::num_chunks] for i in range(num_chunks)] + + num_indices_per_chunk = len(indices) // num_chunks + + chunks = [[] for _ in range(num_chunks)] + chunks_lengths = [0 for _ in range(num_chunks)] + for index in indices: + shortest_chunk = chunks_lengths.index(min(chunks_lengths)) + chunks[shortest_chunk].append(index) + chunks_lengths[shortest_chunk] += lengths[index] + if len(chunks[shortest_chunk]) == num_indices_per_chunk: + chunks_lengths[shortest_chunk] = float("inf") + + return chunks + + +def get_variable_length_grouped_indices(lengths, batch_size, world_size, megabatch_mult=8, generator=None): + # We need to use torch for the random part as a distributed sampler will set the random seed for torch. + indices = torch.randperm(len(lengths), generator=generator) + sorted_indices = sorted(range(len(lengths)), key=lambda i: lengths[i], reverse=True) + megabatch_size = world_size * batch_size * megabatch_mult + megabatches = [sorted_indices[i : i + megabatch_size] for i in range(0, len(lengths), megabatch_size)] + megabatches = [sorted(megabatch, key=lambda i: indices[i], reverse=True) for megabatch in megabatches] + shuffled_indices = [i for megabatch in megabatches for i in megabatch] + world_batch_size = world_size * batch_size + batches = [shuffled_indices[i : i + world_batch_size] for i in range(0, len(lengths), world_batch_size)] + batch_indices = torch.randperm(len(batches), generator=generator) + batches = [batches[i] for i in batch_indices] + + return [i for batch in batches for i in batch] + + +def get_modality_length_grouped_indices(lengths, batch_size, world_size, generator=None): + """ + Return a list of indices so that each slice of `batch_size` consecutive indices correspond to elements of similar + lengths. To do this, the indices are: + + - randomly permuted + - grouped in mega-batches of size `mega_batch_mult * batch_size` + - reorder by length in each mega-batch + + The result is the concatenation of all mega-batches, with the batch of `batch_size` containing the element of + maximum length placed first, so that an OOM happens sooner rather than later. + """ + + # We need to use torch for the random part as a distributed sampler will set the random seed for torch. + assert all(l != 0 for l in lengths), "Should not have zero length." + if all(l > 0 for l in lengths) or all(l < 0 for l in lengths): + # all samples are in the same modality + return get_length_grouped_indices(lengths, batch_size, world_size, generator=generator) + mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0]) + lang_indices, lang_lengths = zip(*[(i, -l) for i, l in enumerate(lengths) if l < 0]) + + mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices(mm_lengths, batch_size, world_size, generator=None)] + lang_shuffle = [lang_indices[i] for i in get_length_grouped_indices(lang_lengths, batch_size, world_size, generator=None)] + megabatch_size = world_size * batch_size + mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)] + lang_megabatches = [lang_shuffle[i : i + megabatch_size] for i in range(0, len(lang_shuffle), megabatch_size)] + + last_mm = mm_megabatches[-1] + last_lang = lang_megabatches[-1] + additional_batch = last_mm + last_lang + megabatches = mm_megabatches[:-1] + lang_megabatches[:-1] + megabatch_indices = torch.randperm(len(megabatches), generator=generator) + megabatches = [megabatches[i] for i in megabatch_indices] + + if len(additional_batch) > 0: + megabatches.append(sorted(additional_batch)) + + return [i for megabatch in megabatches for i in megabatch] + + +def get_length_grouped_indices(lengths, batch_size, world_size, generator=None, merge=True): + """ + Return a list of indices so that each slice of `batch_size` consecutive indices correspond to elements of similar + lengths. To do this, the indices are: + + - randomly permuted + - grouped in mega-batches of size `mega_batch_mult * batch_size` + - reorder by length in each mega-batch + + The result is the concatenation of all mega-batches, with the batch of `batch_size` containing the element of + maximum length placed first, so that an OOM happens sooner rather than later. + """ + + # We need to use torch for the random part as a distributed sampler will set the random seed for torch. + indices = torch.randperm(len(lengths), generator=generator) + megabatch_size = world_size * batch_size + megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)] + megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches] + megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches] + + return [i for megabatch in megabatches for batch in megabatch for i in batch] + + +def get_length_grouped_indices_auto_single(lengths, batch_size, world_size, generator=None): + indices = get_length_grouped_indices_hf(lengths, batch_size * world_size, generator=generator) + + megabatch_size = world_size * batch_size + megabatches = [indices[i : i + megabatch_size] for i in range(0, len(lengths), megabatch_size)] + megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches] + megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches] + + # We need to use torch for the random part as a distributed sampler will set the random seed for torch. + batch_indices = torch.randperm(len(megabatches), generator=generator) + megabatches = [megabatches[i] for i in batch_indices] + + return [i for megabatch in megabatches for batch in megabatch for i in batch] + + +def get_modality_length_grouped_indices_auto(lengths, batch_size, world_size, generator=None): + # We need to use torch for the random part as a distributed sampler will set the random seed for torch. + assert all(l != 0 for l in lengths), "Should not have zero length." + if all(l > 0 for l in lengths) or all(l < 0 for l in lengths): + # all samples are in the same modality + return get_length_grouped_indices_auto_single(lengths, batch_size, world_size, generator=generator) + mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0]) + lang_indices, lang_lengths = zip(*[(i, -l) for i, l in enumerate(lengths) if l < 0]) + + mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices_auto_single(mm_lengths, batch_size, world_size, generator=None)] + lang_shuffle = [lang_indices[i] for i in get_length_grouped_indices_auto_single(lang_lengths, batch_size, world_size, generator=None)] + megabatch_size = world_size * batch_size + mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)] + lang_megabatches = [lang_shuffle[i : i + megabatch_size] for i in range(0, len(lang_shuffle), megabatch_size)] + + last_mm = mm_megabatches[-1] + last_lang = lang_megabatches[-1] + additional_batch = last_mm + last_lang + megabatches = mm_megabatches[:-1] + lang_megabatches[:-1] + megabatch_indices = torch.randperm(len(megabatches), generator=generator) + megabatches = [megabatches[i] for i in megabatch_indices] + + # FIXME: Hard code to avoid last batch mixed with different modalities + # if len(additional_batch) > 0: + # megabatches.append(sorted(additional_batch)) + + return [i for megabatch in megabatches for i in megabatch] + + +class LengthGroupedSampler(Sampler): + r""" + Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while + keeping a bit of randomness. + """ + + def __init__( + self, + batch_size: int, + world_size: int, + lengths: Optional[List[int]] = None, + generator=None, + variable_length: bool = False, + group_by_modality: bool = False, + group_by_modality_auto: bool = False, + ): + if lengths is None: + raise ValueError("Lengths must be provided.") + + self.batch_size = batch_size + self.world_size = world_size + self.lengths = lengths + self.generator = generator + self.variable_length = variable_length + self.group_by_modality = group_by_modality + self.group_by_modality_auto = group_by_modality_auto + + def __len__(self): + return len(self.lengths) + + def __iter__(self): + if self.variable_length: + assert not self.group_by_modality, "Variable length grouping is not supported with modality grouping." + indices = get_variable_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) + else: + if self.group_by_modality: + indices = get_modality_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator) + elif self.group_by_modality_auto: + indices = get_modality_length_grouped_indices_auto(self.lengths, self.batch_size, self.world_size, generator=self.generator) + else: + indices = get_length_grouped_indices_auto_single(self.lengths, self.batch_size, self.world_size, generator=self.generator) + return iter(indices) + + +class LLaVATrainer(Trainer): + + def create_accelerator_and_postprocess(self): + grad_acc_kwargs = {"num_steps": self.args.gradient_accumulation_steps} + grad_acc_kwargs["sync_with_dataloader"] = False + gradient_accumulation_plugin = GradientAccumulationPlugin(**grad_acc_kwargs) + + accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52)) + rank0_print("Setting NCCL timeout to INF to avoid running errors.") + + dataloader_config = DataLoaderConfiguration() + + # create accelerator object + # self.accelerator = Accelerator(dispatch_batches=self.args.dispatch_batches, split_batches=self.args.split_batches, deepspeed_plugin=self.args.deepspeed_plugin, gradient_accumulation_plugin=gradient_accumulation_plugin, kwargs_handlers=[accelerator_kwargs]) + self.accelerator = Accelerator(dataloader_config=dataloader_config, deepspeed_plugin=self.args.deepspeed_plugin, gradient_accumulation_plugin=gradient_accumulation_plugin, kwargs_handlers=[accelerator_kwargs]) + # some Trainer classes need to use `gather` instead of `gather_for_metrics`, thus we store a flag + self.gather_function = self.accelerator.gather_for_metrics + + # deepspeed and accelerate flags covering both trainer args and accelerate launcher + self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None + self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None + + # post accelerator creation setup + if self.is_fsdp_enabled: + fsdp_plugin = self.accelerator.state.fsdp_plugin + fsdp_plugin.limit_all_gathers = self.args.fsdp_config.get("limit_all_gathers", fsdp_plugin.limit_all_gathers) + if is_accelerate_available("0.23.0"): + fsdp_plugin.activation_checkpointing = self.args.fsdp_config.get("activation_checkpointing", fsdp_plugin.activation_checkpointing) + if fsdp_plugin.activation_checkpointing and self.args.gradient_checkpointing: + raise ValueError("The activation_checkpointing in FSDP config and the gradient_checkpointing in training arg " "can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic " "when using FSDP.") + + if self.is_deepspeed_enabled and getattr(self.args, "hf_deepspeed_config", None) is None: + self.propagate_args_to_deepspeed() + + def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: + if self.train_dataset is None or not has_length(self.train_dataset): + return None + + if self.args.group_by_length: + lengths = self.train_dataset.lengths + return LengthGroupedSampler( + # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps + self.args.train_batch_size, + # world_size=self.args.world_size, + world_size=self.args.world_size * self.args.gradient_accumulation_steps, # TODO: seems that this may work? + lengths=lengths, + ) + elif self.args.group_by_modality_length: + lengths = self.train_dataset.modality_lengths + return LengthGroupedSampler( + # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps + self.args.train_batch_size, + # world_size=self.args.world_size, + world_size=self.args.world_size * self.args.gradient_accumulation_steps, # TODO: seems that this may work? + lengths=lengths, + group_by_modality=True, + ) + elif self.args.group_by_modality_length_auto: + lengths = self.train_dataset.modality_lengths + return LengthGroupedSampler( + # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps + self.args.train_batch_size, + # world_size=self.args.world_size, + world_size=self.args.world_size * self.args.gradient_accumulation_steps, # TODO: seems that this may work? + lengths=lengths, + group_by_modality_auto=True, + ) + elif self.args.group_by_varlen: + lengths = self.train_dataset.lengths + return LengthGroupedSampler( + self.args.train_batch_size * self.args.gradient_accumulation_steps, + # self.args.train_batch_size, # TODO: seems that we should have gradient_accumulation_steps + # world_size=self.args.world_size, + world_size=self.args.world_size * self.args.gradient_accumulation_steps, # TODO: seems that this may work? + lengths=lengths, + variable_length=True, + ) + else: + return super()._get_train_sampler() + + def get_train_dataloader(self) -> DataLoader: + """ + Returns the training [`~torch.utils.data.DataLoader`]. + + Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed + training if necessary) otherwise. + + Subclass and override this method if you want to inject some custom behavior. + """ + if self.train_dataset is None: + raise ValueError("Trainer: training requires a train_dataset.") + + train_dataset = self.train_dataset + data_collator = self.data_collator + if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): + train_dataset = self._remove_unused_columns(train_dataset, description="training") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="training") + + dataloader_params = { + "batch_size": self._train_batch_size, + "collate_fn": data_collator, + "num_workers": self.args.dataloader_num_workers, + "pin_memory": self.args.dataloader_pin_memory, + "persistent_workers": self.args.dataloader_persistent_workers, + } + + if not isinstance(train_dataset, torch.utils.data.IterableDataset): + dataloader_params["sampler"] = self._get_train_sampler() + dataloader_params["drop_last"] = self.args.dataloader_drop_last + dataloader_params["worker_init_fn"] = seed_worker + dataloader_params["prefetch_factor"] = self.args.dataloader_num_workers * 2 if self.args.dataloader_num_workers != 0 else None + + dataloader = self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params)) + + return dataloader + + def create_optimizer(self): + """ + Setup the optimizer. + + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method in a subclass. + """ + if is_sagemaker_mp_enabled(): + return super().create_optimizer() + + opt_model = self.model + + if self.optimizer is None: + decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS) + decay_parameters = [name for name in decay_parameters if "bias" not in name] + lr_mapper = {} + lr_mapper_merger = {} + if self.args.mm_projector_lr is not None: + lr_mapper["mm_projector"] = self.args.mm_projector_lr + if self.args.mm_vision_tower_lr is not None: + lr_mapper["vision_tower"] = self.args.mm_vision_tower_lr + if self.args.mm_vision_tower_merger_lr is not None: + lr_mapper_merger["merger"] = self.args.mm_vision_tower_merger_lr + if len(lr_mapper) > 0: + special_lr_parameters = [name for name, _ in opt_model.named_parameters() if any(module_keyword in name for module_keyword in lr_mapper)] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and n not in special_lr_parameters and p.requires_grad)], + "weight_decay": self.args.weight_decay, + }, + { + "params": [p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n not in special_lr_parameters and p.requires_grad)], + "weight_decay": 0.0, + }, + ] + for module_keyword, lr in lr_mapper.items(): + if lr_mapper_merger: + module_parameters = [name for name, _ in opt_model.named_parameters() if module_keyword in name and "merger" not in name] + else: + module_parameters = [name for name, _ in opt_model.named_parameters() if module_keyword in name] + optimizer_grouped_parameters.extend( + [ + { + "params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in module_parameters and p.requires_grad)], + "weight_decay": self.args.weight_decay, + "lr": lr, + }, + { + "params": [p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in module_parameters and p.requires_grad)], + "weight_decay": 0.0, + "lr": lr, + }, + ] + ) + for module_keyword, lr in lr_mapper_merger.items(): + module_parameters = [name for name, _ in opt_model.named_parameters() if module_keyword in name] + optimizer_grouped_parameters.extend( + [ + { + "params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in module_parameters and p.requires_grad)], + "weight_decay": self.args.weight_decay, + "lr": lr, + }, + { + "params": [p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in module_parameters and p.requires_grad)], + "weight_decay": 0.0, + "lr": lr, + }, + ] + ) + else: + optimizer_grouped_parameters = [ + { + "params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)], + "weight_decay": self.args.weight_decay, + }, + { + "params": [p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)], + "weight_decay": 0.0, + }, + ] + + optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) + + self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) + if optimizer_cls.__name__ == "Adam8bit": + import bitsandbytes + + manager = bitsandbytes.optim.GlobalOptimManager.get_instance() + + skipped = 0 + for module in opt_model.modules(): + if isinstance(module, nn.Embedding): + skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) + logger.info(f"skipped {module}: {skipped/2**20}M params") + manager.register_module_override(module, "weight", {"optim_bits": 32}) + logger.debug(f"bitsandbytes: will optimize {module} in fp32") + logger.info(f"skipped: {skipped/2**20}M params") + + return self.optimizer + + def _save_checkpoint(self, model, trial): + if getattr(self.args, "tune_mm_mlp_adapter", False) or ( + hasattr(self.args, "mm_tunable_parts") and (len(self.args.mm_tunable_parts.split(",")) == 1 and ("mm_mlp_adapter" in self.args.mm_tunable_parts or "mm_vision_resampler" in self.args.mm_tunable_parts)) + ): + from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR + + checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" + + run_dir = self._get_output_dir(trial=trial) + output_dir = os.path.join(run_dir, checkpoint_folder) + + # Only save Adapter + keys_to_match = ["mm_projector", "vision_resampler"] + if getattr(self.args, "use_im_start_end", False): + keys_to_match.extend(["embed_tokens", "embed_in"]) + + weight_to_save = get_mm_adapter_state_maybe_zero_3(self.model.named_parameters(), keys_to_match) + + if self.args.local_rank == 0 or self.args.local_rank == -1: + self.model.config.save_pretrained(output_dir) + torch.save(weight_to_save, os.path.join(output_dir, f"mm_projector.bin")) + else: + super(LLaVATrainer, self)._save_checkpoint(model, trial) + + def _save(self, output_dir: Optional[str] = None, state_dict=None): + if getattr(self.args, "tune_mm_mlp_adapter", False): + pass + else: + super(LLaVATrainer, self)._save(output_dir, state_dict) + + +class LLaVADPOTrainer(DPOTrainer): + def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: + if self.train_dataset is None or not has_length(self.train_dataset): + return None + + if self.args.group_by_modality_length: + lengths = self.train_dataset.modality_lengths + return LengthGroupedSampler( + # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps + self.args.train_batch_size, + world_size=self.args.world_size, + lengths=lengths, + group_by_modality=True, + ) + else: + return super()._get_train_sampler() + + def _save_checkpoint(self, model, trial): + if getattr(self.args, "tune_mm_mlp_adapter", False) or ( + hasattr(self.args, "mm_tunable_parts") and (len(self.args.mm_tunable_parts.split(",")) == 1 and ("mm_mlp_adapter" in self.args.mm_tunable_parts or "mm_vision_resampler" in self.args.mm_tunable_parts)) + ): + from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR + + checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" + + run_dir = self._get_output_dir(trial=trial) + output_dir = os.path.join(run_dir, checkpoint_folder) + + # Only save Adapter + keys_to_match = ["mm_projector", "vision_resampler"] + if getattr(self.args, "use_im_start_end", False): + keys_to_match.extend(["embed_tokens", "embed_in"]) + + weight_to_save = get_mm_adapter_state_maybe_zero_3(self.model.named_parameters(), keys_to_match) + + if self.args.local_rank == 0 or self.args.local_rank == -1: + self.model.config.save_pretrained(output_dir) + torch.save(weight_to_save, os.path.join(output_dir, f"mm_projector.bin")) + else: + # super(LLaVADPOTrainer, self)._save_checkpoint(model, trial) + # print(type(model)) + # from transformers.modeling_utils import unwrap_model + # print(type(unwrap_model(model))) + # print(unwrap_model(model).config) + if self.args.lora_enable: + from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR + + checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" + run_dir = self._get_output_dir(trial=trial) + output_dir = os.path.join(run_dir, checkpoint_folder) + from transformers.modeling_utils import unwrap_model + + unwrapped_model = unwrap_model(model) + self.save_my_lora_ckpt(output_dir, self.args, unwrapped_model) + else: + super(LLaVADPOTrainer, self)._save_checkpoint(model, trial) + + def _save(self, output_dir: Optional[str] = None, state_dict=None): + if getattr(self.args, "tune_mm_mlp_adapter", False): + pass + else: + super(LLaVADPOTrainer, self)._save(output_dir, state_dict) diff --git a/VLMEvalKit-sudoku/llava/train/train.py b/VLMEvalKit-sudoku/llava/train/train.py new file mode 100644 index 0000000000000000000000000000000000000000..2ae036817e568e767674bd64183b170167708154 --- /dev/null +++ b/VLMEvalKit-sudoku/llava/train/train.py @@ -0,0 +1,2082 @@ +# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright: +# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright: +# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ast +import os +import copy +from dataclasses import dataclass, field +import json +import logging +import pathlib +from typing import Dict, Optional, Sequence, List +from PIL import Image, ImageFile +from packaging import version +import numpy as np + +import time +import random +import yaml +import math +import re +import torch + +import transformers +import tokenizers +import deepspeed + +from transformers import AutoConfig +from torch.utils.data import Dataset +from llava.constants import IGNORE_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, IMAGE_TOKEN_INDEX +from llava.train.llava_trainer import LLaVATrainer + +from llava import conversation as conversation_lib +from llava.model import * +from llava.mm_utils import process_highres_image, process_anyres_image, process_highres_image_crop_split, tokenizer_image_token +from llava.utils import rank0_print, process_video_with_pyav, process_video_with_decord +# from llava.model.multimodal_encoder.hubconf import get_featup_state_dict +from llava.slice_process import slice_image_minicpm, split_image, resize_image_keep_ratio +import gc +import random + +torch.multiprocessing.set_sharing_strategy("file_system") + +ImageFile.LOAD_TRUNCATED_IMAGES = True +local_rank = None + +IS_TOKENIZER_GREATER_THAN_0_14 = version.parse(tokenizers.__version__) >= version.parse("0.14") + + +@dataclass +class ModelArguments: + model_name_or_path: Optional[str] = field(default="facebook/opt-125m") + model_class_name: Optional[str] = field(default=None, metadata={"help": "Used to init model class, format is XXXXForCausalLM. e.g. currently XXXX is chosen from LlavaLlama, LlavaMixtral, LlavaMistral, Llama"}) + + mm_tunable_parts: Optional[str] = field( + default=None, metadata={"help": 'Could be "mm_mlp_adapter", "mm_vision_resampler", "mm_vision_tower,mm_mlp_adapter,mm_language_model", "mm_vision_tower,mm_mlp_adapter,mm_language_model", "mm_mlp_adapter,mm_language_model"'} + ) + # deciding which part of the multimodal model to tune, will overwrite other previous settings + + version: Optional[str] = field(default="v0") + freeze_backbone: bool = field(default=False) + tune_mm_mlp_adapter: bool = field(default=False) + tune_mm_vision_resampler: bool = field(default=False) + vision_tower: Optional[str] = field(default=None) + vision_tower_pretrained: Optional[str] = field(default=None) # default to the last layer + + unfreeze_mm_vision_tower: bool = field(default=False) + unfreeze_language_model: bool = field(default=False) + mm_vision_select_layer: Optional[int] = field(default=-1) # default to the last layer + pretrain_mm_mlp_adapter: Optional[str] = field(default=None) + mm_projector_type: Optional[str] = field(default="linear") + mm_use_im_start_end: bool = field(default=False) + mm_use_im_patch_token: bool = field(default=True) + mm_patch_merge_type: Optional[str] = field(default="flat") + mm_vision_select_feature: Optional[str] = field(default="patch") + mm_resampler_type: Optional[str] = field(default=None) + mm_mask_drop_mode: str = field(default="fixed") + mm_mask_drop_skip_percentage: float = field(default=0.0) + mm_mask_drop_ratio: float = field(default=0.25) + mm_mask_drop_ratio_upper: Optional[float] = field(default=None) + mm_mask_drop_ratio_lower: Optional[float] = field(default=None) + mm_spatial_pool_stride: Optional[int] = field(default=None) + mm_spatial_pool_mode: str = field(default="bilinear") + mm_spatial_pool_out_channels: Optional[int] = field(default=None) + mm_perceiver_depth: Optional[int] = field(default=3) + mm_perceiver_latents: Optional[int] = field(default=32) + mm_perceiver_ff_mult: Optional[float] = field(default=4) + mm_perceiver_pretrained: Optional[str] = field(default=None) + mm_qformer_depth: Optional[int] = field(default=3) + mm_qformer_latents: Optional[int] = field(default=32) + mm_qformer_pretrained: Optional[str] = field(default=None) + + rope_scaling_factor: Optional[float] = field(default=None) + rope_scaling_type: Optional[str] = field(default=None) + + s2: Optional[bool] = field(default=False) + s2_scales: Optional[str] = field(default="336,672,1008") + + use_pos_skipping: Optional[bool] = field(default=False) + pos_skipping_range: Optional[int] = field(default=4096) + + + mm_newline_position: Optional[str] = field(default="grid") + delay_load: Optional[bool] = field(default=True) + add_faster_video: Optional[bool] = field(default=False) + faster_token_stride: Optional[int] = field(default=10) + model_mode: Optional[str] = field(default="llava") # llava, uhd_v1, uhd_v2 + # jbu_ckpt: Optional[str] = field(default="/mnt/data/user/tc_agi/zyp/featup/upsampler/0919/checkpoints/clip-large_jbu_4x_stack_cocostuff_attention_crf_0.001_tv_0.0_ent_0.0-0.001-True-30-2-5_2000.ckpt") + feature_scale_mask: Optional[int] = field(default=7) #Binary Mask Representation of Feature Scale Combination 1, (11)2, (111)2, (1111)2 means {1}, {1,2,3}, {1,2,3,4}, {1,2,3,4} + sft_jbu: bool = field(default=False) + merger_from_prev: bool = field(default=False) + +@dataclass +class DataArguments: + data_path: str = field(default=None, metadata={"help": "Path to the training data, in llava's instruction.json format. Supporting multiple json files via /path/to/{a,b,c}.json"}) + lazy_preprocess: bool = False + is_multimodal: bool = False + early_mix_text: bool = False + image_folder: Optional[str] = field(default=None) + image_aspect_ratio: str = "square" + image_grid_pinpoints: Optional[str] = field(default=None) + image_crop_resolution: Optional[int] = field(default=None) + image_split_resolution: Optional[int] = field(default=None) + + video_folder: Optional[str] = field(default=None) + video_fps: Optional[int] = field(default=1) + frames_upbound: Optional[int] = field(default=0) + add_time_instruction: Optional[bool] = field(default=False) + force_sample: Optional[bool] = field(default=False) + data_mode: Optional[str] = field(default="llava") # llava, uhd_v1, uhd_v2 + res_mode: Optional[str] = field(default="clip") # clip, siglip + single: Optional[bool] = field(default=False, metadata={"help": "if sight is true slicing images will not be processed, which is used in pretrain stage"}) + resolution: Optional[int] = field(default=1024) # clip, siglip + split_patch_size: Optional[int] = field(default=32) # clip, siglip + any_res: Optional[bool] = field(default=False) + + +@dataclass +class TrainingArguments(transformers.TrainingArguments): + # dispatch_batches: bool = field(default=False) + # split_batches: bool = field(default=False) + cache_dir: Optional[str] = field(default=None) + optim: str = field(default="adamw_torch") + remove_unused_columns: bool = field(default=False) + freeze_mm_mlp_adapter: bool = field(default=False) + freeze_mm_vision_resampler: bool = field(default=False) + mpt_attn_impl: Optional[str] = field(default="triton") + model_max_length: int = field( + default=4096, + metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."}, + ) + double_quant: bool = field(default=True, metadata={"help": "Compress the quantization statistics through double quantization."}) + quant_type: str = field(default="nf4", metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."}) + bits: int = field(default=16, metadata={"help": "How many bits to use."}) + lora_enable: bool = False + lora_r: int = 64 + lora_alpha: int = 16 + lora_dropout: float = 0.05 + lora_weight_path: str = "" + lora_bias: str = "none" + mm_projector_lr: Optional[float] = None + mm_vision_tower_lr: Optional[float] = None + mm_vision_tower_merger_lr: Optional[float] = None + group_by_varlen: bool = field(default=False) + group_by_modality_length: bool = field(default=False) + group_by_modality_length_auto: bool = field(default=False) + auto_find_batch_size: bool = field(default=False) + gradient_checkpointing: bool = field(default=True) + verbose_logging: bool = field(default=False) + attn_implementation: str = field(default="flash_attention_2", metadata={"help": "Use transformers attention implementation."}) + + +# @dataclass +# class EvaluationArguments: +# eval_num_processes: int = field(default=1) +# task_names: str = field(default=None) +# model: str = field(default="llava") +# model_args: Optional[str] = field(default=None) +# num_fewshot: Optional[int] = field(default=None) +# batch_size: int = field(default=1) +# device: Optional[str] = field(default=None) +# limit: Optional[int] = field(default=None) +# check_integrity: Optional[bool] = field(default=False) +# show_task_to_terminal: Optional[bool] = field(default=False) +# log_samples: Optional[bool] = field(default=True) +# gen_kwargs: Optional[str] = field(default="") +# log_samples_suffix: Optional[str] = field(default="") +# output_path: Optional[str] = field(default="./logs/") + + +def maybe_zero_3(param, ignore_status=False, name=None): + from deepspeed import zero + from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus + + if hasattr(param, "ds_id"): + if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + if not ignore_status: + logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}") + with zero.GatheredParameters([param]): + param = param.data.detach().cpu().clone() + else: + param = param.detach().cpu().clone() + return param + + +# Borrowed from peft.utils.get_peft_model_state_dict +def get_peft_state_maybe_zero_3(named_params, bias): + if bias == "none": + to_return = {k: t for k, t in named_params if "lora_" in k} + elif bias == "all": + to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k} + elif bias == "lora_only": + to_return = {} + maybe_lora_bias = {} + lora_bias_names = set() + for k, t in named_params: + if "lora_" in k: + to_return[k] = t + bias_name = k.split("lora_")[0] + "bias" + lora_bias_names.add(bias_name) + elif "bias" in k: + maybe_lora_bias[k] = t + for k, t in maybe_lora_bias: + if bias_name in lora_bias_names: + to_return[bias_name] = t + else: + raise NotImplementedError + to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()} + return to_return + + +def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True): + to_return = {k: t for k, t in named_params if "lora_" not in k} + if require_grad_only: + to_return = {k: t for k, t in to_return.items() if t.requires_grad} + to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()} + return to_return + + +def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match): + to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)} + to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()} + return to_return + + +def find_all_linear_names(model): + cls = torch.nn.Linear + lora_module_names = set() + multimodal_keywords = ["mm_projector", "vision_tower", "vision_resampler"] + for name, module in model.named_modules(): + if any(mm_keyword in name for mm_keyword in multimodal_keywords): + continue + if isinstance(module, cls): + names = name.split(".") + lora_module_names.add(names[0] if len(names) == 1 else names[-1]) + + if "lm_head" in lora_module_names: # needed for 16-bit + lora_module_names.remove("lm_head") + return list(lora_module_names) + + +def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str): + """Collects the state dict and dump to disk.""" + if hasattr(trainer.args, "tune_mm_mlp_adapter") and trainer.args.tune_mm_mlp_adapter: + check_only_save_mm_adapter_tunnable = True + # only has mm_mlp_adapter and mm_vision_resampler in the tuneable parts + elif hasattr(trainer.args, "mm_tunable_parts") and (len(trainer.args.mm_tunable_parts.split(",")) == 1 and ("mm_mlp_adapter" in trainer.args.mm_tunable_parts or "mm_vision_resampler" in trainer.args.mm_tunable_parts)): + check_only_save_mm_adapter_tunnable = True + else: + check_only_save_mm_adapter_tunnable = False + + trainer.accelerator.wait_for_everyone() + torch.cuda.synchronize() + rank0_print(f"Only save projectors: {check_only_save_mm_adapter_tunnable}") + if check_only_save_mm_adapter_tunnable: + # Only save Adapter + keys_to_match = ["mm_projector", "vision_resampler"] + if getattr(trainer.args, "use_im_start_end", False): + keys_to_match.extend(["embed_tokens", "embed_in"]) + + weight_to_save = get_mm_adapter_state_maybe_zero_3(trainer.model.named_parameters(), keys_to_match) + trainer.model.config.save_pretrained(output_dir) + + current_folder = output_dir.split("/")[-1] + parent_folder = os.path.dirname(output_dir) + if trainer.args.local_rank == 0 or trainer.args.local_rank == -1: + if current_folder.startswith("checkpoint-"): + mm_projector_folder = os.path.join(parent_folder, "mm_projector") + os.makedirs(mm_projector_folder, exist_ok=True) + torch.save(weight_to_save, os.path.join(mm_projector_folder, f"{current_folder}.bin")) + else: + torch.save(weight_to_save, os.path.join(output_dir, f"mm_projector.bin")) + return + + if trainer.deepspeed: + trainer.save_model(output_dir) + return + + state_dict = trainer.model.state_dict() + if trainer.args.should_save: + cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()} + del state_dict + trainer._save(output_dir, state_dict=cpu_state_dict) # noqa + + +def smart_tokenizer_and_embedding_resize( + special_tokens_dict: Dict, + tokenizer: transformers.PreTrainedTokenizer, + model: transformers.PreTrainedModel, +): + """Resize tokenizer and embedding. + + Note: This is the unoptimized version that may make your embedding size not be divisible by 64. + """ + num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict) + model.resize_token_embeddings(len(tokenizer)) + + if num_new_tokens > 0: + input_embeddings = model.get_input_embeddings().weight.data + output_embeddings = model.get_output_embeddings().weight.data + + input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) + output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True) + + input_embeddings[-num_new_tokens:] = input_embeddings_avg + output_embeddings[-num_new_tokens:] = output_embeddings_avg + + +def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict: + """Tokenize a list of strings.""" + tokenized_list = [ + tokenizer( + text, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ) + for text in strings + ] + input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list] + input_ids_lens = labels_lens = [tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list] + return dict( + input_ids=input_ids, + labels=labels, + input_ids_lens=input_ids_lens, + labels_lens=labels_lens, + ) + + +def _mask_targets(target, tokenized_lens, speakers): + # cur_idx = 0 + cur_idx = tokenized_lens[0] + tokenized_lens = tokenized_lens[1:] + target[:cur_idx] = IGNORE_INDEX + for tokenized_len, speaker in zip(tokenized_lens, speakers): + if speaker == "human": + target[cur_idx + 2 : cur_idx + tokenized_len] = IGNORE_INDEX + cur_idx += tokenized_len + + +def _add_speaker_and_signal(header, source, get_conversation=True): + """Add speaker and start/end signal on each round.""" + BEGIN_SIGNAL = "### " + END_SIGNAL = "\n" + conversation = header + for sentence in source: + from_str = sentence["from"] + if from_str.lower() == "human": + from_str = conversation_lib.default_conversation.roles[0] + elif from_str.lower() == "gpt": + from_str = conversation_lib.default_conversation.roles[1] + else: + from_str = "unknown" + sentence["value"] = BEGIN_SIGNAL + from_str + ": " + sentence["value"] + END_SIGNAL + if get_conversation: + conversation += sentence["value"] + conversation += BEGIN_SIGNAL + return conversation + + +def preprocess_multimodal(sources: Sequence[str], data_args: DataArguments) -> Dict: + is_multimodal = data_args.is_multimodal + if not is_multimodal: + return sources + + for source in sources: + for sentence in source: + # TODO maybe this should be changed for interleaved data? + # if DEFAULT_IMAGE_TOKEN in sentence["value"] and not sentence["value"].startswith(DEFAULT_IMAGE_TOKEN): + # only check for num_im=1 + num_im = len(re.findall(DEFAULT_IMAGE_TOKEN, sentence["value"])) + if num_im == 1 and DEFAULT_IMAGE_TOKEN in sentence["value"] and not sentence["value"].startswith(DEFAULT_IMAGE_TOKEN): + sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "").strip() + sentence["value"] = DEFAULT_IMAGE_TOKEN + "\n" + sentence["value"] + sentence["value"] = sentence["value"].strip() + if "mmtag" in conversation_lib.default_conversation.version: + sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, "" + DEFAULT_IMAGE_TOKEN + "") + replace_token = DEFAULT_IMAGE_TOKEN + if data_args.mm_use_im_start_end: + replace_token = DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN + sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token) + + # For videoInstruct-100k noisy_data. TODO: Ask Yuanhan to clean the data instead of leaving the noise code here. + sentence["value"] = sentence["value"].replace("QA_GT_caption_based_noisy", "") + + return sources + + +def preprocess_llama_2(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict: + conv = conversation_lib.default_conversation.copy() + roles = {"human": conv.roles[0], "gpt": conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence["from"]] + assert role == conv.roles[j % 2], f"{i}" + conv.append_message(role, sentence["value"]) + conversations.append(conv.get_prompt()) + + # Tokenize conversations + + if has_image: + input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0) + else: + input_ids = tokenizer( + conversations, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + + targets = input_ids.clone() + + assert conv.sep_style == conversation_lib.SeparatorStyle.LLAMA_2 + + # Mask targets + sep = "[/INST] " + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + rounds = conversation.split(conv.sep2) + cur_len = 1 + target[:cur_len] = IGNORE_INDEX + for i, rou in enumerate(rounds): + if rou == "": + break + + parts = rou.split(sep) + if len(parts) != 2: + break + parts[0] += sep + + if has_image: + round_len = len(tokenizer_image_token(rou, tokenizer)) + instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 + else: + round_len = len(tokenizer(rou).input_ids) + instruction_len = len(tokenizer(parts[0]).input_ids) - 2 + + target[cur_len : cur_len + instruction_len] = IGNORE_INDEX + + cur_len += round_len + target[cur_len:] = IGNORE_INDEX + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_INDEX + print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f" (ignored)") + + return dict( + input_ids=input_ids, + labels=targets, + ) + + +def preprocess_gemma(sources: List[List[Dict[str, str]]], tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict: + conv: conversation_lib.Conversation = conversation_lib.default_conversation.copy() + roles: Dict[str, str] = {"human": conv.roles[0], "gpt": conv.roles[1]} + + # Apply prompt templates + conversations: List[str] = [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != conv.roles[0]: + # Skip the first one if it is not from human + source: List[Dict[str, str]] = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role: str = roles[sentence["from"]] + assert role == conv.roles[j % 2], f"{i}" + conv.append_message(role, sentence["value"]) + conversations.append(conv.get_prompt()) + + # Tokenize conversations + if has_image: + input_ids: torch.Tensor = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0) + else: + input_ids: torch.Tensor = tokenizer( + conversations, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + + targets: torch.Tensor = input_ids.clone() + assert conv.sep_style == conversation_lib.SeparatorStyle.GEMMA + + # Mask target + sep: str = conv.sep + conv.roles[1] + for conversation, target in zip(conversations, targets): + total_len: int = int(target.ne(tokenizer.pad_token_id).sum()) + + rounds: List[str] = conversation.split(conv.sep) + re_rounds = [] + for conv_idx in range(0, len(rounds), 2): + re_rounds.append(conv.sep.join(rounds[conv_idx : conv_idx + 2])) + + cur_len = 1 # Ignore + target[:cur_len] = IGNORE_INDEX + for i, rou in enumerate(re_rounds): + if rou == "": + break + + parts = rou.split(sep) + if len(parts) != 2: + break + parts[0] += sep # Re-append sep because split on this + # Now "".join(parts)==rou + + if has_image: + round_len = len(tokenizer_image_token(rou, tokenizer)) - 1 # Ignore + instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 1 # Ignore + else: + round_len = len(tokenizer(rou).input_ids) - 1 # Ignore + instruction_len = len(tokenizer(parts[0]).input_ids) - 1 # Ignore + + round_len += 2 # sep: \n takes 2 tokens + target[cur_len : cur_len + instruction_len] = IGNORE_INDEX + cur_len += round_len + + target[cur_len:] = IGNORE_INDEX + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_INDEX + print(f"warning: tokenization mismatch: {cur_len} vs. {total_len}." f" (ignored)") + + return dict( + input_ids=input_ids, + labels=targets, + ) + + +def preprocess_qwen(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False, max_len=2048, system_message: str = "You are a helpful assistant.") -> Dict: + # roles = {"human": "<|im_start|>user", "gpt": "<|im_start|>assistant"} + roles = {"human": "user", "gpt": "assistant"} + + # Add image tokens to tokenizer as a special tokens + # Use a deepcopy of tokenizer so that we don't modify on the tokenizer + tokenizer = copy.deepcopy(tokenizer) + # When there is actually an image, we add the image tokens as a special token + if has_image: + tokenizer.add_tokens([""], special_tokens=True) + + image_token_index = tokenizer.convert_tokens_to_ids("") + # im_start, im_end = tokenizer.additional_special_tokens_ids + im_start = tokenizer.convert_tokens_to_ids("<|im_start|>") + im_end = tokenizer.convert_tokens_to_ids("<|im_end|>") + # unmask_tokens = ["<|im_start|>", "<|im_start|>", "\n"] + unmask_tokens_idx = [198, im_start, im_end] + nl_tokens = tokenizer("\n").input_ids + + # Reset Qwen chat templates so that it won't include system message every time we apply + chat_template = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + tokenizer.chat_template = chat_template + + # Apply prompt templates + input_ids, targets = [], [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != roles["human"]: + source = source[1:] + + input_id, target = [], [] + + # New version, use apply chat template + # Build system message for each sentence + input_id += tokenizer.apply_chat_template([{"role" : "system", "content" : system_message}]) + target += [IGNORE_INDEX] * len(input_id) + + for conv in source: + # Make sure llava data can load + try: + role = conv["role"] + content = conv["content"] + except: + role = conv["from"] + content = conv["value"] + + role = roles.get(role, role) + + conv = [{"role" : role, "content" : content}] + encode_id = tokenizer.apply_chat_template(conv) + input_id += encode_id + if role in ["user", "system"]: + target += [IGNORE_INDEX] * len(encode_id) + else: + target += encode_id + + + + assert len(input_id) == len(target), f"{len(input_id)} != {len(target)}" + for idx, encode_id in enumerate(input_id): + if encode_id in unmask_tokens_idx: + target[idx] = encode_id + if encode_id == image_token_index: + input_id[idx] = IMAGE_TOKEN_INDEX + input_ids.append(input_id) + targets.append(target) + input_ids = torch.tensor(input_ids, dtype=torch.long) + targets = torch.tensor(targets, dtype=torch.long) + + #for OOM https://github.com/LLaVA-VL/LLaVA-NeXT/issues/352 + del tokenizer + + return dict( + input_ids=input_ids, # tensor(bs x seq_len) + labels=targets, # tensor(bs x seq_len) + ) + +def preprocess_qwen3(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False, max_len=2048) -> Dict: + # roles = {"human": "<|im_start|>user", "gpt": "<|im_start|>assistant"} + roles = {"human": "user", "gpt": "assistant"} + + # Add image tokens to tokenizer as a special tokens + # Use a deepcopy of tokenizer so that we don't modify on the tokenizer + tokenizer = copy.deepcopy(tokenizer) + # When there is actually an image, we add the image tokens as a special token + if has_image: + tokenizer.add_tokens([""], special_tokens=True) + image_token_index = tokenizer.convert_tokens_to_ids("") + # im_start, im_end = tokenizer.additional_special_tokens_ids + im_start = tokenizer.convert_tokens_to_ids("<|im_start|>") + im_end = tokenizer.convert_tokens_to_ids("<|im_end|>") + object_ref_start = tokenizer.convert_tokens_to_ids("<|object_ref_start|>") + object_ref_end = tokenizer.convert_tokens_to_ids("<|object_ref_end|>") + box_start = tokenizer.convert_tokens_to_ids("<|box_start|>") + box_end = tokenizer.convert_tokens_to_ids("<|box_end|>") + quad_start = tokenizer.convert_tokens_to_ids("<|quad_start|>") + quad_end = tokenizer.convert_tokens_to_ids("<|quad_end|>") + vision_start = tokenizer.convert_tokens_to_ids("<|vision_start|>") + vision_end = tokenizer.convert_tokens_to_ids("<|vision_end|>") + vision_pad = tokenizer.convert_tokens_to_ids("<|vision_pad|>") + image_pad = tokenizer.convert_tokens_to_ids("<|image_pad|>") + video_pad = tokenizer.convert_tokens_to_ids("<|video_pad|>") + think_start = tokenizer.convert_tokens_to_ids("") + think_end = tokenizer.convert_tokens_to_ids("") + unmask_tokens_idx = [ + 198, im_start, im_end, + object_ref_start, object_ref_end, + box_start, box_end, + quad_start, quad_end, + vision_start, vision_end, + vision_pad, image_pad, video_pad, + think_start, think_end + ] + nl_tokens = tokenizer("\n").input_ids + + # Reset Qwen chat templates so that it won't include system message every time we apply + chat_template = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" + tokenizer.chat_template = chat_template + + # Apply prompt templates + input_ids, targets = [], [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != roles["human"]: + source = source[1:] + + input_id, target = [], [] + + for conv in source: + try: + role = conv["role"] + content = conv["content"] + except: + role = conv["from"] + content = conv["value"] + role = roles.get(role, role) + conv = [{"role" : role, "content" : content}] + ret = tokenizer.apply_chat_template(conv, tokenize=False, add_generation_prompt=False, enable_thinking=False) + encode_id = tokenizer.apply_chat_template(conv, add_generation_prompt=False, enable_thinking=False) + if role in ["user", "system"]: + target += [IGNORE_INDEX] * len(encode_id) + input_id += encode_id + elif role in ["assistant"]: + think_part_id = tokenizer.encode('\n\n\n\n') + encode_id = encode_id[:3] + think_part_id + encode_id[3:] + target += [IGNORE_INDEX] * 7 + encode_id[7:] + input_id += encode_id + else: + import pdb; pdb.set_trace() + assert len(input_id) == len(target), f"{len(input_id)} != {len(target)}" + for idx, encode_id in enumerate(input_id): + if encode_id in unmask_tokens_idx: + target[idx] = encode_id + if encode_id == image_token_index: + input_id[idx] = IMAGE_TOKEN_INDEX + assert len(input_id) == len(target), f"{len(input_id)} != {len(target)}" + input_ids.append(input_id) + targets.append(target) + input_ids = torch.tensor(input_ids, dtype=torch.long) + targets = torch.tensor(targets, dtype=torch.long) + + #for OOM https://github.com/LLaVA-VL/LLaVA-NeXT/issues/352 + del tokenizer + + return dict( + input_ids=input_ids, # tensor(bs x seq_len) + labels=targets, # tensor(bs x seq_len) + ) + +def preprocess_llama3( + sources, + tokenizer: transformers.PreTrainedTokenizer, + has_image: bool = False, + max_len=2048, + system_message: str = "You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.", +) -> Dict: + # roles = {"human": "<|start_header_id|>user<|end_header_id|>", "gpt": "<|start_header_id|>assistant<|end_header_id|>"} + roles = {"human": "user", "gpt": "assistant"} + + # Add image tokens to tokenizer as a special tokens + # Use a deepcopy of tokenizer so that we don't modify on the tokenizer + tokenizer = copy.deepcopy(tokenizer) + # When there is actually an image, we add the image tokens as a special token + if has_image: + tokenizer.add_tokens([""], special_tokens=True) + image_token_index = tokenizer.convert_tokens_to_ids("") + bos_token_id = tokenizer.convert_tokens_to_ids("<|begin_of_text|>") + start_header_id = tokenizer.convert_tokens_to_ids("<|start_header_id|>") + end_header_id = tokenizer.convert_tokens_to_ids("<|end_header_id|>") + eot_id = tokenizer.convert_tokens_to_ids("<|eot_id|>") + + unmask_tokens = ["<|begin_of_text|>", "<|start_header_id|>", "<|end_header_id|>", "<|eot_id|>", "\n\n"] + unmask_tokens_idx = [tokenizer.convert_tokens_to_ids(tok) for tok in unmask_tokens] + + # After update, calling tokenizer of llama3 will + # auto add bos id for the tokens. ヽ(`⌒´)ノ + def safe_tokenizer_llama3(text): + input_ids = tokenizer(text).input_ids + if input_ids[0] == bos_token_id: + input_ids = input_ids[1:] + return input_ids + + nl_tokens = tokenizer.convert_tokens_to_ids("\n\n") + # Apply prompt templates + input_ids, targets = [], [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != roles["human"]: + source = source[1:] + + input_id, target = [], [] + + # New version, use apply chat template + # Build system message for each sentence + input_id += tokenizer.apply_chat_template([{"role" : "system", "content" : system_message}]) + target += [IGNORE_INDEX] * len(input_id) + + for conv in source: + # Make sure llava data can load + try: + role = conv["role"] + content = conv["content"] + except: + role = conv["from"] + content = conv["value"] + + role = roles.get(role, role) + + conv = [{"role" : role, "content" : content}] + # First is bos token we don't need here + encode_id = tokenizer.apply_chat_template(conv)[1:] + input_id += encode_id + if role in ["user", "system"]: + target += [IGNORE_INDEX] * len(encode_id) + else: + target += encode_id + + + + assert len(input_id) == len(target), f"{len(input_id)} != {len(target)}" + for idx, encode_id in enumerate(input_id): + if encode_id in unmask_tokens_idx: + target[idx] = encode_id + if encode_id == image_token_index: + input_id[idx] = IMAGE_TOKEN_INDEX + input_ids.append(input_id) + targets.append(target) + input_ids = torch.tensor(input_ids, dtype=torch.long) + targets = torch.tensor(targets, dtype=torch.long) + + return dict( + input_ids=input_ids, # tensor(bs x seq_len) + labels=targets, # tensor(bs x seq_len) + ) + + +def preprocess_v1(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict: + conv = conversation_lib.default_conversation.copy() + roles = {"human": conv.roles[0], "gpt": conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence["from"]] + assert role == conv.roles[j % 2], f"{i}" + conv.append_message(role, sentence["value"]) + conversations.append(conv.get_prompt()) + + # Tokenize conversations + + if has_image: + input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0) + else: + input_ids = tokenizer( + conversations, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + + targets = input_ids.clone() + + assert conv.sep_style == conversation_lib.SeparatorStyle.TWO + + # Mask targets + sep = conv.sep + conv.roles[1] + ": " + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + rounds = conversation.split(conv.sep2) + cur_len = 1 + target[:cur_len] = IGNORE_INDEX + for i, rou in enumerate(rounds): + if rou == "": + break + + parts = rou.split(sep) + if len(parts) != 2: + break + parts[0] += sep + + if has_image: + round_len = len(tokenizer_image_token(rou, tokenizer)) + instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 + else: + round_len = len(tokenizer(rou).input_ids) + instruction_len = len(tokenizer(parts[0]).input_ids) - 2 + + if i != 0 and not tokenizer.legacy and IS_TOKENIZER_GREATER_THAN_0_14: + round_len -= 1 + instruction_len -= 1 + + target[cur_len : cur_len + instruction_len] = IGNORE_INDEX + + cur_len += round_len + target[cur_len:] = IGNORE_INDEX + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_INDEX + print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f" (ignored)") + + return dict( + input_ids=input_ids, + labels=targets, + ) + + +def preprocess_mpt(sources, tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict: + conv = conversation_lib.default_conversation.copy() + roles = {"human": conv.roles[0], "gpt": conv.roles[1]} + + # Apply prompt templates + conversations = [] + for i, source in enumerate(sources): + if roles[source[0]["from"]] != conv.roles[0]: + # Skip the first one if it is not from human + source = source[1:] + + conv.messages = [] + for j, sentence in enumerate(source): + role = roles[sentence["from"]] + assert role == conv.roles[j % 2], f"{i}" + conv.append_message(role, sentence["value"]) + conversations.append(conv.get_prompt()) + + # Tokenize conversations + + if has_image: + input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations], dim=0) + else: + input_ids = tokenizer( + conversations, + return_tensors="pt", + padding="longest", + max_length=tokenizer.model_max_length, + truncation=True, + ).input_ids + + targets = input_ids.clone() + assert conv.sep_style == conversation_lib.SeparatorStyle.MPT + + # Mask targets + sep = conv.sep + conv.roles[1] + for conversation, target in zip(conversations, targets): + total_len = int(target.ne(tokenizer.pad_token_id).sum()) + + rounds = conversation.split(conv.sep) + re_rounds = [conv.sep.join(rounds[:3])] # system + user + gpt + for conv_idx in range(3, len(rounds), 2): + re_rounds.append(conv.sep.join(rounds[conv_idx : conv_idx + 2])) # user + gpt + cur_len = 1 + target[:cur_len] = IGNORE_INDEX + for i, rou in enumerate(re_rounds): + if rou == "": + break + + parts = rou.split(sep) + if len(parts) != 2: + break + parts[0] += sep + + if has_image: + round_len = len(tokenizer_image_token(rou, tokenizer)) + instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 1 + else: + round_len = len(tokenizer(rou).input_ids) + instruction_len = len(tokenizer(parts[0]).input_ids) - 1 + + if i != 0 and getattr(tokenizer, "legacy", False) and IS_TOKENIZER_GREATER_THAN_0_14: + round_len += 1 + instruction_len += 1 + + target[cur_len : cur_len + instruction_len] = IGNORE_INDEX + + cur_len += round_len + target[cur_len:] = IGNORE_INDEX + + if cur_len < tokenizer.model_max_length: + if cur_len != total_len: + target[:] = IGNORE_INDEX + print(f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}." f"(#turns={len(re_rounds)} ignored)") + + return dict( + input_ids=input_ids, + labels=targets, + ) + + +def preprocess_plain( + sources: Sequence[str], + tokenizer: transformers.PreTrainedTokenizer, +) -> Dict: + # add end signal and concatenate together + conversations = [] + for source in sources: + assert len(source) == 2 + assert DEFAULT_IMAGE_TOKEN in source[0]["value"] + source[0]["value"] = DEFAULT_IMAGE_TOKEN + conversation = source[0]["value"] + source[1]["value"] + conversation_lib.default_conversation.sep + conversations.append(conversation) + # tokenize conversations + input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations] + targets = copy.deepcopy(input_ids) + for target, source in zip(targets, sources): + tokenized_len = len(tokenizer_image_token(source[0]["value"], tokenizer)) + target[:tokenized_len] = IGNORE_INDEX + + return dict(input_ids=input_ids, labels=targets) + + +def preprocess(sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, has_image: bool = False) -> Dict: + """ + Given a list of sources, each is a conversation list. This transform: + 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; + 2. Concatenate conversations together; + 3. Tokenize the concatenated conversation; + 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX. + """ + if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN: + return preprocess_plain(sources, tokenizer) + if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.LLAMA_2: + return preprocess_llama_2(sources, tokenizer, has_image=has_image) + if conversation_lib.default_conversation.version.startswith("v1"): + return preprocess_v1(sources, tokenizer, has_image=has_image) + if conversation_lib.default_conversation.version == "mpt": + return preprocess_mpt(sources, tokenizer, has_image=has_image) + if conversation_lib.default_conversation.version == "qwen3": + return preprocess_qwen3(sources, tokenizer, has_image=has_image) + if conversation_lib.default_conversation.version == "qwen": + return preprocess_qwen(sources, tokenizer, has_image=has_image) + if conversation_lib.default_conversation.version == "gemma": + return preprocess_gemma(sources, tokenizer, has_image=has_image) + if conversation_lib.default_conversation.version == "llama_v3": + return preprocess_llama3(sources, tokenizer, has_image=has_image) + # add end signal and concatenate together + conversations = [] + for source in sources: + header = f"{conversation_lib.default_conversation.system}\n\n" + conversation = _add_speaker_and_signal(header, source) + conversations.append(conversation) + + # tokenize conversations + def get_tokenize_len(prompts): + return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts] + + if has_image: + input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversations] + else: + conversations_tokenized = _tokenize_fn(conversations, tokenizer) + input_ids = conversations_tokenized["input_ids"] + + targets = copy.deepcopy(input_ids) + for target, source in zip(targets, sources): + if has_image: + tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source]) + else: + tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"] + speakers = [sentence["from"] for sentence in source] + _mask_targets(target, tokenized_lens, speakers) + + return dict(input_ids=input_ids, labels=targets) + + +class LazySupervisedDataset(Dataset): + def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer, data_args: DataArguments): + super(LazySupervisedDataset, self).__init__() + self.tokenizer = tokenizer + self.list_data_dict = [] + + # Handle multiple JSON files specified in the data_path + if "{" in data_path and "}" in data_path: + base_path, file_pattern = re.match(r"^(.*)\{(.*)\}\.json$", data_path).groups() + file_names = file_pattern.split(",") + rank0_print(f"Loading {file_names} from {base_path}") + data_args.dataset_paths = [] + for file_name in file_names: + data_args.dataset_paths.append(f"{base_path}{file_name}.json") + full_path = f"{base_path}{file_name}.json" + rank0_print(f"Loading {full_path}") + with open(full_path, "r") as file: + cur_data_dict = json.load(file) + rank0_print(f"Loaded {len(cur_data_dict)} samples from {full_path}") + self.list_data_dict.extend(cur_data_dict) + elif data_path.endswith(".yaml"): + with open(data_path, "r") as file: + yaml_data = yaml.safe_load(file) + datasets = yaml_data.get("datasets") + # file should be in the format of: + # datasets: + # - json_path: xxxx1.json + # sampling_strategy: first:1000 + # - json_path: xxxx2.json + # sampling_strategy: end:3000 + # - json_path: xxxx3.json + # sampling_strategy: random:999 + data_args.dataset_paths = [dataset.get("json_path") for dataset in datasets] + for dataset in datasets: + json_path = dataset.get("json_path") + sampling_strategy = dataset.get("sampling_strategy", "all") + sampling_number = None + + rank0_print(f"Loading {json_path} with {sampling_strategy} sampling strategy") + + if json_path.endswith(".jsonl"): + cur_data_dict = [] + with open(json_path, "r") as json_file: + for line in json_file: + cur_data_dict.append(json.loads(line.strip())) + elif json_path.endswith(".json"): + with open(json_path, "r") as json_file: + cur_data_dict = json.load(json_file) + else: + raise ValueError(f"Unsupported file type: {json_path}") + + if ":" in sampling_strategy: + sampling_strategy, sampling_number = sampling_strategy.split(":") + if "%" in sampling_number: + sampling_number = math.ceil(int(sampling_number.split("%")[0]) * len(cur_data_dict) / 100) + else: + sampling_number = int(sampling_number) + + # Apply the sampling strategy + if sampling_strategy == "first" and sampling_number is not None: + cur_data_dict = cur_data_dict[:sampling_number] + elif sampling_strategy == "end" and sampling_number is not None: + cur_data_dict = cur_data_dict[-sampling_number:] + elif sampling_strategy == "random" and sampling_number is not None: + random.shuffle(cur_data_dict) + cur_data_dict = cur_data_dict[:sampling_number] + + rank0_print(f"Loaded {len(cur_data_dict)} samples from {json_path}") + self.list_data_dict.extend(cur_data_dict) + else: + data_args.dataset_paths = [data_path] + rank0_print(f"Loading {data_path}") + with open(data_path, "r") as file: + cur_data_dict = json.load(file) + rank0_print(f"Loaded {len(cur_data_dict)} samples from {data_path}") + self.list_data_dict.extend(cur_data_dict) + + rank0_print(f"Loaded {len(self.list_data_dict)} samples from {data_path}") + rank0_print("Formatting inputs...Skip in lazy mode") + self.tokenizer = tokenizer + self.data_args = data_args + + def __len__(self): + return len(self.list_data_dict) + + @property + def lengths(self): + length_list = [] + for sample in self.list_data_dict: + img_tokens = 128 if "image" in sample else 0 + length_list.append(sum(len(conv["value"].split()) for conv in sample["conversations"]) + img_tokens) + return length_list + + @property + def modality_lengths(self): + length_list = [] + for sample in self.list_data_dict: + cur_len = sum(len(conv["value"].split()) for conv in sample["conversations"]) + assert cur_len > 0, f"Conversation length is 0 for {sample}" + if "image" in sample or "video" in sample or self.data_args.early_mix_text: + length_list.append(cur_len) + else: + length_list.append(-cur_len) + return length_list + + def resize_as_grid(self, image, slice_num, res): + w, h = image.size + r = math.sqrt(slice_num * res * res / h / w) # 保证是切为slice_num片 + fix_h = int(h * r) + fix_w = int(w * r) + image = image.resize((fix_w, fix_h), Image.Resampling.BICUBIC) + return image + + def process_image(self, image_file, overwrite_image_aspect_ratio=None): + image_folder = self.data_args.image_folder + processor = self.data_args.image_processor + # print(f"\n\nInspecting the image path, folder = {image_folder}, image={image_file}\n\n") + try: + image = Image.open(os.path.join(image_folder, image_file)).convert("RGB") + except Exception as exn: + print(f"Failed to open image {image_file}. Exception:", exn) + raise exn + + image_size = image.size + image_aspect_ratio = self.data_args.image_aspect_ratio + if overwrite_image_aspect_ratio is not None: + image_aspect_ratio = overwrite_image_aspect_ratio + if image_aspect_ratio == "highres": + image = process_highres_image(image, self.data_args.image_processor, self.data_args.image_grid_pinpoints) + elif image_aspect_ratio == "anyres" or "anyres_max" in image_aspect_ratio: + image = process_anyres_image(image, self.data_args.image_processor, self.data_args.image_grid_pinpoints) + elif image_aspect_ratio == "crop_split": + image = process_highres_image_crop_split(image, self.data_args) + elif image_aspect_ratio == "pad": + + def expand2square(pil_img, background_color): + width, height = pil_img.size + if width == height: + return pil_img + elif width > height: + result = Image.new(pil_img.mode, (width, width), background_color) + result.paste(pil_img, (0, (width - height) // 2)) + return result + else: + result = Image.new(pil_img.mode, (height, height), background_color) + result.paste(pil_img, ((height - width) // 2, 0)) + return result + + image = expand2square(image, tuple(int(x * 255) for x in processor.image_mean)) + image = processor.preprocess(image, return_tensors="pt")["pixel_values"][0] + else: + image = processor.preprocess(image, return_tensors="pt")["pixel_values"][0] + return image, image_size, "image" + + def __getitem__(self, i) -> Dict[str, torch.Tensor]: + # TODO: define number of retries somewhere else + num_base_retries = 3 + num_final_retries = 300 + + # try the current sample first + for attempt_idx in range(num_base_retries): + try: + sample = self._get_item(i) + return sample + except Exception as e: + # sleep 1s in case it is a cloud disk issue + print(f"[Try #{attempt_idx}] Failed to fetch sample {i}. Exception:", e) + time.sleep(1) + + # try other samples, in case it is file corruption issue + for attempt_idx in range(num_base_retries): + try: + next_index = min(i + 1, len(self.list_data_dict) - 1) + # sample_idx = random.choice(range(len(self))) + sample = self._get_item(next_index) + return sample + except Exception as e: + # no need to sleep + print(f"[Try other #{attempt_idx}] Failed to fetch sample {next_index}. Exception:", e) + pass + + try: + sample = self._get_item(i) + return sample + except Exception as e: + raise e + + def _get_item(self, i) -> Dict[str, torch.Tensor]: + sources = self.list_data_dict[i] + if isinstance(i, int): + sources = [sources] + assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME + + patch_images = None + ind_tokens = None + # LLaVA UHD + if self.data_args.data_mode == 'uhd_v1' or self.data_args.data_mode == 'uhd_v2': + if 'image' in sources[0]: + image_file = self.list_data_dict[i]['image'] + if not isinstance(image_files, list): + image_files = [image_files] # 兼容旧格式:单图 -> 单元素列表 + + image_folder = self.data_args.image_folder + processor = self.data_args.image_processor + crop_size = self.data_args.image_processor.crop_size + res = self.data_args.resolution + patch_size = self.data_args.split_patch_size + + image = Image.open(os.path.join(image_folder, image_file)).convert('RGB') + + #for new sota datasets "TextVQA", old uhd datasets 'textvqa' + if 'TextVQA' in image_file or 'textvqa' in image_file: + image = resize_image_keep_ratio(image, max_size=1024) + # else: + # _, temp_patches, _, _ = slice_image_minicpm(image, max_slice_nums=7, scale_resolution=res, patch_size=14, never_split=False) + # if random.random() < 0.1 and len(temp_patches) != 6: + # image = self.resize_as_grid(image, slice_num=6, res=res) + + source_image, patches, best_grid, ind_tokens = slice_image_minicpm( + image, max_slice_nums=7, scale_resolution=res, patch_size=patch_size, never_split=False, any_res=self.data_args.any_res) + + if self.data_args.single: + patches = [] + best_grid = None + ind_tokens = [] + + if best_grid is None: + source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False, + do_rescale=True, do_normalize=True, + return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w + patch_tensors = torch.zeros(1, 3, crop_size['height'], crop_size['width']) + else: + source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False, + do_rescale=True, do_normalize=True, + return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w + patch_tensors = processor.preprocess(patches, do_resize=False, do_center_crop=False, + do_rescale=True, do_normalize=True, + return_tensors='pt')['pixel_values'] # num_slice, 3, s_h, s_w + image = source_tensors[0] # 3, h, w + patch_images = patch_tensors # bs, 3, h, w + + sources = preprocess_multimodal( + copy.deepcopy([e["conversations"] for e in sources]), + self.data_args) + else: + sources = copy.deepcopy([e["conversations"] for e in sources]) + else: + if "image" in sources[0]: + image_file = self.list_data_dict[i]["image"] + if type(image_file) is list: + image = [self.process_image(f) for f in image_file] + # Handling multi images + # overwrite to process with simple pad + if len(image_file) > 1: + image = [self.process_image(f, "pad") for f in image_file] + image = [[im[0], im[1], "image"] for im in image] + else: + image = [self.process_image(image_file)] + sources = preprocess_multimodal(copy.deepcopy([e["conversations"] for e in sources]), self.data_args) + + elif "video" in sources[0]: + video_file = self.list_data_dict[i]["video"] + video_folder = self.data_args.video_folder + video_file = os.path.join(video_folder, video_file) + suffix = video_file.split(".")[-1] + if not os.path.exists(video_file): + print("File {} not exist!".format(video_file)) + + try: + if "shareVideoGPTV" in video_file: + frame_files = [os.path.join(video_file, f) for f in os.listdir(video_file) if os.path.isfile(os.path.join(video_file, f))] + frame_files.sort() # Ensure the frames are sorted if they are named sequentially + + # TODO: Hard CODE: Determine the indices for uniformly sampling 10 frames + if self.data_args.force_sample: + num_frames_to_sample = self.data_args.frames_upbound + else: + num_frames_to_sample = 10 + + avg_fps = 2 + + total_frames = len(frame_files) + sampled_indices = np.linspace(0, total_frames - 1, num_frames_to_sample, dtype=int) + + + frame_time = [i/2 for i in sampled_indices] + frame_time = ",".join([f"{i:.2f}s" for i in frame_time]) + + video_time = total_frames / avg_fps + + # Read and store the sampled frames + video = [] + for idx in sampled_indices: + frame_path = frame_files[idx] + try: + with Image.open(frame_path) as img: + frame = img.convert("RGB") + video.append(frame) + except IOError: + print(f"Failed to read frame at path: {frame_path}") + else: + video, video_time, frame_time, num_frames_to_sample = process_video_with_decord(video_file, self.data_args) + + processor = self.data_args.image_processor + image = processor.preprocess(video, return_tensors="pt")["pixel_values"] + if self.data_args.add_time_instruction: + time_instruciton = f"The video lasts for {video_time:.2f} seconds, and {num_frames_to_sample} frames are uniformly sampled from it. These frames are located at {frame_time}.Please answer the following questions related to this video." + sources[0]["conversations"][0]["value"] = f'{DEFAULT_IMAGE_TOKEN}\n{time_instruciton}\n{sources[0]["conversations"][0]["value"].replace(DEFAULT_IMAGE_TOKEN, "")}' + image = [(image, video[0].size, "video")] + sources = preprocess_multimodal(copy.deepcopy([e["conversations"] for e in sources]), self.data_args) + # print(sources) + except Exception as e: + print(f"Error: {e}") + print(f"Failed to read video file: {video_file}") + return self._get_item(i + 1) + else: + sources = copy.deepcopy([e["conversations"] for e in sources]) + + has_image = ("image" in self.list_data_dict[i]) or ("video" in self.list_data_dict[i]) + data_dict = preprocess(sources, self.tokenizer, has_image=has_image) + + if "prompt" in data_dict: + prompt = data_dict["prompt"] + else: + prompt = None + + if isinstance(i, int): + data_dict = dict(input_ids=data_dict["input_ids"][0], labels=data_dict["labels"][0]) + + # image exist in the data + if "image" in self.list_data_dict[i]: + data_dict["image"] = image + data_dict['patch_images'] = patch_images + data_dict['ind_tokens'] = ind_tokens + + elif "video" in self.list_data_dict[i]: + data_dict["image"] = image + elif self.data_args.is_multimodal: + # image does not exist in the data, but the model is multimodal + crop_size = self.data_args.image_processor.crop_size + if self.data_args.data_mode == 'uhd_v1' or self.data_args.data_mode == 'uhd_v2': + data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width']) + data_dict['patch_images'] = torch.zeros(1, 3, crop_size['height'], crop_size['width']) + data_dict['ind_tokens'] = [] + else: + data_dict["image"] = [ + (torch.zeros(1, 3, crop_size["height"], crop_size["width"]), (crop_size["width"], crop_size["height"]), "text"), + ] + # prompt exist in the data + if prompt is not None: + data_dict["prompt"] = prompt + + data_dict["id"] = self.list_data_dict[i].get("id", i) + + return data_dict + + +@dataclass +class DataCollatorForSupervisedDataset(object): + """Collate examples for supervised fine-tuning.""" + + def __init__(self, tokenizer: transformers.PreTrainedTokenizer, data_args: DataArguments): + self.tokenizer = tokenizer + self.data_args = data_args + + def pad_sequence(self, input_ids, batch_first, padding_value): + if self.tokenizer.padding_side == "left": + input_ids = [torch.flip(_input_ids, [0]) for _input_ids in input_ids] + input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=batch_first, padding_value=padding_value) + if self.tokenizer.padding_side == "left": + input_ids = torch.flip(input_ids, [1]) + return input_ids + + def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: + input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) + # input_ids, labels, ids = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels", "id")) + input_ids = [_input_ids[: self.tokenizer.model_max_length] for _input_ids in input_ids] + labels = [_labels[: self.tokenizer.model_max_length] for _labels in labels] + if self.tokenizer.pad_token_id is None: + # self.tokenizer.pad_token_id = self.tokenizer.eos_token_id # FIXME: this could only be triggered for llama3 model. + self.tokenizer.pad_token_id = 0 # This gets the best result. Don't know why. + input_ids = self.pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id) + labels = self.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) + batch = dict(input_ids=input_ids, labels=labels.long() if labels.dtype == torch.int32 else labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id)) + # batch = dict(input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), ids=ids) + + if "image" in instances[0]: + images = [instance["image"] for instance in instances] + + batch["image_sizes"] = [im[1] for im_list in images for im in im_list] + batch["modalities"] = [im[2] for im_list in images for im in im_list] + + if self.data_args.data_mode == 'uhd_v1' or self.data_args.data_mode == 'uhd_v2': + batch["images"] = images + else: + images = [im[0] for im_list in images for im in im_list] + + # if all(x is not None and x.shape == images[0].shape for x in images): + # Image: (N, P, C, H, W) + # Video: (N, F, C, H, W) + # batch["images"] = torch.stack(images) + # else: + batch["images"] = images + + if "prompt" in instances[0]: + batch["prompts"] = [instance["prompt"] for instance in instances] + + if 'patch_images' in instances[0]: + batch['patch_images'] = [instance['patch_images'] for instance in instances] + if 'ind_tokens' in instances[0]: + batch['ind_tokens'] = [instance['ind_tokens'] for instance in instances] + return batch + + +def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict: + """Make dataset and collator for supervised fine-tuning.""" + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, data_path=data_args.data_path, data_args=data_args) + data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer, data_args=data_args) + return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator) + + +def get_model(model_args, training_args, bnb_model_from_pretrained_args): + assert training_args.attn_implementation + if training_args.attn_implementation == "sdpa" and torch.__version__ < "2.1.2": + raise ValueError("The 'sdpa' attention implementation requires torch version 2.1.2 or higher.") + + customized_kwargs = dict() + customized_kwargs.update(bnb_model_from_pretrained_args) + cfg_pretrained = None + + overwrite_config = {} + if any( + [ + model_args.rope_scaling_factor is not None, + model_args.rope_scaling_type is not None, + model_args.mm_spatial_pool_stride is not None, + model_args.mm_spatial_pool_out_channels is not None, + model_args.mm_spatial_pool_mode is not None, + model_args.mm_resampler_type is not None, + ] + ): + cfg_pretrained = AutoConfig.from_pretrained(model_args.model_name_or_path) + + if model_args.use_pos_skipping is not None and model_args.pos_skipping_range is not None: + overwrite_config["use_pos_skipping"] = model_args.use_pos_skipping + overwrite_config["pos_skipping_range"] = model_args.pos_skipping_range + + if model_args.rope_scaling_factor is not None and model_args.rope_scaling_type is not None: + overwrite_config["rope_scaling"] = { + "factor": model_args.rope_scaling_factor, + "type": model_args.rope_scaling_type, + } + if training_args.model_max_length is None: + training_args.model_max_length = cfg_pretrained.max_position_embeddings * model_args.rope_scaling_factor + overwrite_config["max_sequence_length"] = training_args.model_max_length + assert training_args.model_max_length == int(cfg_pretrained.max_position_embeddings * model_args.rope_scaling_factor), print( + f"model_max_length: {training_args.model_max_length}, max_position_embeddings: {cfg_pretrained.max_position_embeddings}, rope_scaling_factor: {model_args.rope_scaling_factor}" + ) + # overwrite_config["max_sequence_length"] = model_args.max_sequence_length + # overwrite_config["tokenizer_model_max_length"] = model_args.tokenizer_model_max_length + + if model_args.mm_spatial_pool_stride is not None and model_args.mm_spatial_pool_out_channels is not None and model_args.mm_spatial_pool_mode is not None and model_args.mm_resampler_type is not None: + overwrite_config["mm_resampler_type"] = model_args.mm_resampler_type + overwrite_config["mm_spatial_pool_stride"] = model_args.mm_spatial_pool_stride + overwrite_config["mm_spatial_pool_out_channels"] = model_args.mm_spatial_pool_out_channels + overwrite_config["mm_spatial_pool_mode"] = model_args.mm_spatial_pool_mode + + if model_args.mm_spatial_pool_mode is not None: + overwrite_config["mm_spatial_pool_mode"] = model_args.mm_spatial_pool_mode + + if overwrite_config: + assert cfg_pretrained is not None, "cfg_pretrained is None" + + rank0_print(f"Overwriting config with {overwrite_config}") + for k, v in overwrite_config.items(): + setattr(cfg_pretrained, k, v) + + customized_kwargs["config"] = cfg_pretrained + + if model_args.model_class_name is not None: + actual_model_class_name = f"{model_args.model_class_name}ForCausalLM" + model_class = getattr(transformers, actual_model_class_name) + rank0_print(f"Using model class {model_class} from {model_args.model_class_name}") + model = model_class.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=training_args.attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + low_cpu_mem_usage=False, + **customized_kwargs, + ) + elif model_args.vision_tower is not None: + if "mixtral" in model_args.model_name_or_path.lower(): + model = LlavaMixtralForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=training_args.attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + low_cpu_mem_usage=False, + **customized_kwargs, + ) + from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock + + deepspeed.utils.set_z3_leaf_modules(model, [MixtralSparseMoeBlock]) + elif "mistral" in model_args.model_name_or_path.lower() or "zephyr" in model_args.model_name_or_path.lower(): + model = LlavaMistralForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=training_args.attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + low_cpu_mem_usage=False, + **customized_kwargs, + ) + elif ( + "wizardlm-2" in model_args.model_name_or_path.lower() + or "vicuna" in model_args.model_name_or_path.lower() + or "llama" in model_args.model_name_or_path.lower() + # or "yi" in model_args.model_name_or_path.lower() + or "nous-hermes" in model_args.model_name_or_path.lower() + and "wizard-2" in model_args.model_name_or_path.lower() + ): + model = LlavaLlamaForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=training_args.attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + low_cpu_mem_usage=False, + **customized_kwargs, + ) + elif "qwen3" in model_args.model_name_or_path.lower(): + model = LlavaQwen3ForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=training_args.attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + low_cpu_mem_usage=False, + **customized_kwargs, + ) + elif "qwen" in model_args.model_name_or_path.lower(): + if "moe" in model_args.model_name_or_path.lower() or "A14B" in model_args.model_name_or_path: + model = LlavaQwenMoeForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=training_args.attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + low_cpu_mem_usage=False, + **customized_kwargs, + ) + from transformers.models.qwen2_moe.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock + + deepspeed.utils.set_z3_leaf_modules(model, [Qwen2MoeSparseMoeBlock]) + else: + model = LlavaQwenForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=training_args.attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + low_cpu_mem_usage=False, + **customized_kwargs, + ) + elif "gemma" in model_args.model_name_or_path.lower(): + model = LlavaGemmaForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=training_args.attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + low_cpu_mem_usage=False, + **customized_kwargs, + ) + else: + raise ValueError(f"Unknown model class {model_args}") + else: + model = transformers.LlamaForCausalLM.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + attn_implementation=training_args.attn_implementation, + torch_dtype=(torch.bfloat16 if training_args.bf16 else None), + low_cpu_mem_usage=False, + **customized_kwargs, + ) + return model + + +def train(attn_implementation=None): + os.environ["PYTOCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128" + gc.collect() + torch.cuda.empty_cache() + + global local_rank + + parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments)) + model_args, data_args, training_args, remaining_args = parser.parse_args_into_dataclasses(return_remaining_strings=True) + # import pdb; pdb.set_trace() + data_args.data_mode = model_args.model_mode + + # if 'clip' in model_args.vision_tower: + # data_args.res_mode = 'clip' + # elif 'siglip2' in model_args.vision_tower and 'swin' in model_args.vision_tower: + # data_args.res_mode = 'siglip2_swin' + # elif 'siglip2' in model_args.vision_tower: + # data_args.res_mode = 'siglip2' + # elif 'siglip' in model_args.vision_tower: + # data_args.res_mode = 'siglip' + # print('res_mode:', data_args.res_mode) + + + rank0_print(f'data_args.resolution:{data_args.resolution}') + rank0_print(f'data_args.split_patch_size:{data_args.split_patch_size}') + + if training_args.verbose_logging: + rank0_print(f"Inspecting experiment hyperparameters:\n") + rank0_print(f"model_args = {vars(model_args)}\n\n") + rank0_print(f"data_args = {vars(data_args)}\n\n") + rank0_print(f"training_args = {vars(training_args)}\n\n") + # rank0_print(f"evaluation_args = {vars(evaluation_args)}\n\n") + + local_rank = training_args.local_rank + compute_dtype = torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32) + + bnb_model_from_pretrained_args = {} + if training_args.bits in [4, 8]: + from transformers import BitsAndBytesConfig + + bnb_model_from_pretrained_args.update( + dict( + device_map={"": training_args.device}, + load_in_4bit=training_args.bits == 4, + load_in_8bit=training_args.bits == 8, + quantization_config=BitsAndBytesConfig( + load_in_4bit=training_args.bits == 4, + load_in_8bit=training_args.bits == 8, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=compute_dtype, + bnb_4bit_use_double_quant=training_args.double_quant, + bnb_4bit_quant_type=training_args.quant_type, # {'fp4', 'nf4'} + ), + ) + ) + + model = get_model(model_args, training_args, bnb_model_from_pretrained_args) + model.config.use_cache = False + if model_args.rope_scaling_factor is not None and model_args.rope_scaling_type is not None: + model.config.rope_scaling = { + "factor": model_args.rope_scaling_factor, + "type": model_args.rope_scaling_type, + } + + if model_args.freeze_backbone: + model.model.requires_grad_(False) + + if training_args.bits in [4, 8]: + from peft import prepare_model_for_kbit_training + + model.config.torch_dtype = torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32) + model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing) + + if training_args.gradient_checkpointing: + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + else: + + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + + if training_args.lora_enable: + from peft import LoraConfig, get_peft_model + + lora_config = LoraConfig( + r=training_args.lora_r, + lora_alpha=training_args.lora_alpha, + target_modules=find_all_linear_names(model), + lora_dropout=training_args.lora_dropout, + bias=training_args.lora_bias, + task_type="CAUSAL_LM", + ) + if training_args.bits == 16: + if training_args.bf16: + model.to(torch.bfloat16) + if training_args.fp16: + model.to(torch.float16) + rank0_print("Adding LoRA adapters...") + model = get_peft_model(model, lora_config) + + if "mistral" in model_args.model_name_or_path.lower() or "mixtral" in model_args.model_name_or_path.lower() or "zephyr" in model_args.model_name_or_path.lower(): + tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side="left") + elif "qwen" in model_args.model_name_or_path.lower(): + tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side="right") + elif ( + "wizardlm-2" in model_args.model_name_or_path.lower() + or "vicuna" in model_args.model_name_or_path.lower() + or "llama" in model_args.model_name_or_path.lower() + or "nous-hermes" in model_args.model_name_or_path.lower() + and "wizard-2" in model_args.model_name_or_path.lower() + ): + tokenizer = transformers.AutoTokenizer.from_pretrained( + model_args.model_name_or_path, + cache_dir=training_args.cache_dir, + model_max_length=training_args.model_max_length, + padding_side="right", + use_fast=False, + ) + + rank0_print(f"Prompt version: {model_args.version}") + if model_args.version == "v0": + if tokenizer.pad_token is None: + smart_tokenizer_and_embedding_resize( + special_tokens_dict=dict(pad_token="[PAD]"), + tokenizer=tokenizer, + model=model, + ) + elif model_args.version == "v0.5": + tokenizer.pad_token = tokenizer.unk_token + else: + if tokenizer.unk_token is not None: + tokenizer.pad_token = tokenizer.unk_token + if model_args.version in conversation_lib.conv_templates: + conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version] + else: + conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1"] + + if model_args.vision_tower is not None: + model.config.model_mode = model_args.model_mode + model.config.feature_scale_mask = model_args.feature_scale_mask + # model.config.jbu_ckpt = model_args.jbu_ckpt + model.get_model().initialize_vision_modules(model_args=model_args, fsdp=training_args.fsdp) + # if model_args.model_mode == 'uhd_v2': + # state_dict = get_featup_state_dict(model_args.jbu_ckpt) + # model.get_model().mm_projector.upsampler.load_state_dict(state_dict, strict=False) + # model.get_model().mm_projector.upsampler.to(training_args.device) + + vision_tower = model.get_vision_tower() + vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device) + data_args.image_processor = vision_tower.image_processor + data_args.is_multimodal = True + + model.config.image_aspect_ratio = data_args.image_aspect_ratio + if data_args.image_grid_pinpoints is not None: + if isinstance(data_args.image_grid_pinpoints, str) and "x" in data_args.image_grid_pinpoints: + try: + patch_size = data_args.image_processor.size[0] + except Exception as e: + patch_size = data_args.image_processor.size["shortest_edge"] + + assert patch_size in [224, 336, 384, 448, 512], "patch_size should be in [224, 336, 384, 448, 512]" + # Use regex to extract the range from the input string + matches = re.findall(r"\((\d+)x(\d+)\)", data_args.image_grid_pinpoints) + range_start = tuple(map(int, matches[0])) + range_end = tuple(map(int, matches[-1])) + # Generate a matrix of tuples from (range_start[0], range_start[1]) to (range_end[0], range_end[1]) + grid_pinpoints = [(i, j) for i in range(range_start[0], range_end[0] + 1) for j in range(range_start[1], range_end[1] + 1)] + # Multiply all elements by patch_size + data_args.image_grid_pinpoints = [[dim * patch_size for dim in pair] for pair in grid_pinpoints] + elif isinstance(data_args.image_grid_pinpoints, str): + data_args.image_grid_pinpoints = ast.literal_eval(data_args.image_grid_pinpoints) + + model.config.image_grid_pinpoints = data_args.image_grid_pinpoints + model.config.image_crop_resolution = data_args.image_crop_resolution + model.config.image_split_resolution = data_args.image_split_resolution + model.config.tokenizer_padding_side = tokenizer.padding_side + model.config.tokenizer_model_max_length = tokenizer.model_max_length + model.config.mm_newline_position = model_args.mm_newline_position + model.config.add_faster_video = model_args.add_faster_video + model.config.faster_token_stride = model_args.faster_token_stride + model.config.add_time_instruction = data_args.add_time_instruction + model.config.force_sample = data_args.force_sample + model.config.mm_spatial_pool_stride = model_args.mm_spatial_pool_stride + + ### Deciding train which part of the model + if model_args.mm_tunable_parts is None: # traditional way of deciding which part to train + model.config.tune_mm_mlp_adapter = training_args.tune_mm_mlp_adapter = model_args.tune_mm_mlp_adapter + model.config.tune_mm_vision_resampler = training_args.tune_mm_vision_resampler = model_args.tune_mm_vision_resampler + + if model_args.tune_mm_mlp_adapter or model_args.tune_mm_vision_resampler: + model.requires_grad_(False) + if model_args.tune_mm_mlp_adapter: + for p in model.get_model().mm_projector.parameters(): + p.requires_grad = True + # if model_args.model_mode == 'uhd_v2': + # # for p in model.get_model().mm_projector.upsampler.upsampler.parameters(): + # # p.requires_grad = model_args.sft_jbu + # for p in model.get_model().mm_projector.upsampler.upsampler.fixup_proj.parameters(): + # p.requires_grad = model_args.sft_jbu + if model_args.tune_mm_vision_resampler: + for p in model.get_model().vision_resampler.parameters(): + p.requires_grad = True + + model.config.freeze_mm_mlp_adapter = training_args.freeze_mm_mlp_adapter + if training_args.freeze_mm_mlp_adapter: + for p in model.get_model().mm_projector.parameters(): + p.requires_grad = False + + model.config.freeze_mm_vision_resampler = training_args.freeze_mm_vision_resampler + if training_args.freeze_mm_vision_resampler: + for p in model.get_model().vision_resampler.parameters(): + p.requires_grad = False + + model.config.unfreeze_mm_vision_tower = model_args.unfreeze_mm_vision_tower + if model_args.unfreeze_mm_vision_tower: + vision_tower.requires_grad_(True) + else: + vision_tower.requires_grad_(False) + + else: + rank0_print(f"Using mm_tunable_parts: {model_args.mm_tunable_parts}") + model.config.mm_tunable_parts = training_args.mm_tunable_parts = model_args.mm_tunable_parts + # Set the entire model to not require gradients by default + model.requires_grad_(False) + vision_tower.requires_grad_(False) + model.get_model().mm_projector.requires_grad_(False) + model.get_model().vision_resampler.requires_grad_(False) + # Parse the mm_tunable_parts to decide which parts to unfreeze + tunable_parts = model_args.mm_tunable_parts.split(",") + if "mm_mlp_adapter" in tunable_parts: + for p in model.get_model().mm_projector.parameters(): + p.requires_grad = True + # if model_args.model_mode == 'uhd_v2': + # for p in model.get_model().mm_projector.upsampler.upsampler.parameters(): + # p.requires_grad = False + # for p in model.get_model().mm_projector.upsampler.upsampler.fixup_proj.parameters(): + # p.requires_grad = model_args.sft_jbu + if "mm_vision_adapter" in tunable_parts: + for name, param in model.named_parameters(): + if "vision_tower" in name: + if 'merger' in name: + param.requires_grad_(True) + # if 'merger' in name or 'embeddings' in name: + # param.requires_grad_(True) + if "mm_vision_resampler" in tunable_parts: + for p in model.get_model().vision_resampler.parameters(): + p.requires_grad = True + if "mm_vision_tower" in tunable_parts: + for name, param in model.named_parameters(): + if "vision_tower" in name: + param.requires_grad_(True) + if "mm_language_model" in tunable_parts: + for name, param in model.named_parameters(): + if "vision_tower" not in name and "mm_projector" not in name and "vision_resampler" not in name: + param.requires_grad_(True) + if "vision_tower_position_embedding" in tunable_parts: + for name, param in model.named_parameters(): + if "vision_tower" in name and "position_embedding" in name: + param.requires_grad_(True) + if "vision_tower_embedding" in tunable_parts: + for name, param in model.named_parameters(): + if "vision_tower" in name and "embedding" in name: + param.requires_grad_(True) + # import pdb; pdb.set_trace() + rank0_print('----------tunner part------------') + for name, param in model.named_parameters(): + if param.requires_grad: + rank0_print(name) + # import pdb; pdb.set_trace() + + total_params = sum(p.ds_numel if hasattr(p, "ds_numel") else p.numel() for p in model.parameters()) + trainable_params = sum(p.ds_numel if hasattr(p, "ds_numel") else p.numel() for p in model.parameters() if p.requires_grad) + rank0_print(f"Total parameters: ~{total_params/1e6:.2f} MB)") + rank0_print(f"Trainable parameters: ~{trainable_params/1e6:.2f} MB)") + if training_args.bits in [4, 8]: + model.get_model().mm_projector.to(dtype=compute_dtype, device=training_args.device) + + model.config.mm_use_im_start_end = data_args.mm_use_im_start_end = model_args.mm_use_im_start_end + model.config.mm_projector_lr = training_args.mm_projector_lr + model.config.mm_vision_tower_lr = training_args.mm_vision_tower_lr + model.config.mm_vision_tower_merger_lr = training_args.mm_vision_tower_merger_lr + training_args.use_im_start_end = model_args.mm_use_im_start_end + model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token + model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer) + + if training_args.bits in [4, 8]: + from peft.tuners.lora import LoraLayer + + for name, module in model.named_modules(): + if isinstance(module, LoraLayer): + if training_args.bf16: + module = module.to(torch.bfloat16) + if "norm" in name: + module = module.to(torch.float32) + if "lm_head" in name or "embed_tokens" in name: + if hasattr(module, "weight"): + if training_args.bf16 and module.weight.dtype == torch.float32: + module = module.to(torch.bfloat16) + + data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args) + # merger_list = [ module.weight for name, module in model.named_modules() if "zero_init_fc" in name] + # print(merger_list) + + ### Callback ### + from transformers import TrainerCallback + import wandb + + class DoneFlagCallback(TrainerCallback): + def on_train_end(self, args, state, control, **kwargs): + flag_path = os.path.join(training_args.output_dir, "done.flag") + with open(flag_path, "w") as f: + f.write("done\n") + print(f"✅ 写入完成标志: {flag_path}") + + class MergerCallback(TrainerCallback): + def __init__(self, target_step=50): + self.target_step = target_step + + def on_step_end(self, args, state, control, **kwargs): + if state.global_step % self.target_step != 0: + return + + model = kwargs['model'] + optimizer = kwargs['optimizer'] + + rank0_print(f"\n🔍 Step {state.global_step}: merger layer snapshot:") + + # 显示权重均值和方差 + for name, param in model.named_parameters(): + if "merger" in name and "zero" in name: + rank0_print(f" {name}: mean={param.data.mean().item():.6f}, std={param.data.std().item():.6f}") + + + class LayerIOStatsCallback(TrainerCallback): + def __init__(self, target_step=50, layer_names=None, project="auto-wandb", run_name=None): + self.target_step = target_step + self.layer_names = layer_names or [] + self.project = project + self.run_name = run_name + self._wandb_initialized = False + self.hook_handles = [] + self.io_stats = {} + + def _get_module_by_name(self, model, name): + module = model + for attr in name.split("."): + module = getattr(module, attr) + return module + + def _register_hooks(self, model): + for name in self.layer_names: + module = self._get_module_by_name(model, name) + + def make_hook(layer_name): + def hook_fn(mod, inp, outp): + inp_tensor = inp[0][0].detach().cpu() if isinstance(inp[0], torch.Tensor) else None + out_tensor = outp[0].detach().cpu() if isinstance(outp, tuple) else outp.detach().cpu() + self.io_stats[layer_name] = { + "input": inp_tensor, + "output": out_tensor + } + return hook_fn + + handle = module.register_forward_hook(make_hook(name)) + self.hook_handles.append(handle) + + def _remove_hooks(self): + for h in self.hook_handles: + h.remove() + self.hook_handles = [] + + def _maybe_init_wandb(self): + if not self._wandb_initialized and wandb.run is None: + wandb.init(project=self.project, name=self.run_name) + self._wandb_initialized = True + + def on_step_begin(self, args, state, control, **kwargs): + self.io_stats = {} + self._register_hooks(kwargs["model"]) + + def on_step_end(self, args, state, control, **kwargs): + if state.global_step % self.target_step != 0: + self._remove_hooks() + return + + self._maybe_init_wandb() + + wandb_dict = {} + rank0_print(f"\n🔍 [Step {state.global_step}] Layer I/O stats (per-sample 0):") + for name, data in self.io_stats.items(): + x_in, x_out = data.get("input"), data.get("output") + if x_in is not None and x_out is not None: + rank0_print(f" 📌 {name}:") + rank0_print(f" input : shape={x_in.shape}, mean={x_in.mean():.4f}, std={x_in.std():.4f}") + rank0_print(f" output: shape={x_out.shape}, mean={x_out.mean():.4f}, std={x_out.std():.4f}") + wandb_dict[f"{name}/input_mean"] = x_in.mean().item() + wandb_dict[f"{name}/input_std"] = x_in.std().item() + wandb_dict[f"{name}/output_mean"] = x_out.mean().item() + wandb_dict[f"{name}/output_std"] = x_out.std().item() + else: + rank0_print(f" ⚠️ {name}: missing input/output") + if wandb_dict: + wandb.log(wandb_dict, step=state.global_step) + self._remove_hooks() + merger_layer_names = ['model.vision_tower.vision_tower.vision_model.encoder.layers.4.merger', 'model.vision_tower.vision_tower.vision_model.encoder.layers.18.merger'] + # trainer = LLaVATrainer(model=model, tokenizer=tokenizer, args=training_args, callbacks=[LayerIOStatsCallback(target_step=2, layer_names=merger_layer_names)], **data_module) + trainer = LLaVATrainer(model=model, processing_class=tokenizer, args=training_args, callbacks=[DoneFlagCallback()], **data_module) + if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")): + trainer.train(resume_from_checkpoint=True) + else: + trainer.train() + trainer.save_state() + + model.config.use_cache = True + + if training_args.lora_enable: + state_dict = get_peft_state_maybe_zero_3(model.named_parameters(), training_args.lora_bias) + non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(model.named_parameters()) + if training_args.local_rank == 0 or training_args.local_rank == -1: + if hasattr(model, "config"): + model.config.save_pretrained(training_args.output_dir) + if hasattr(model, "generation_config"): + model.generation_config.save_pretrained(training_args.output_dir) + model.save_pretrained(training_args.output_dir, state_dict=state_dict) + torch.save(non_lora_state_dict, os.path.join(training_args.output_dir, "non_lora_trainables.bin")) + else: + safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir) + + rank0_print(f"Model saved to {training_args.output_dir}") + + +if __name__ == "__main__": + train() diff --git a/VLMEvalKit-sudoku/vlmeval/__init__.py b/VLMEvalKit-sudoku/vlmeval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba138025775b0d79b5da9ba82e2fcd546d044a5d --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/__init__.py @@ -0,0 +1,21 @@ +import ssl +ssl._create_default_https_context = ssl._create_unverified_context +# Temporarily bypass SSL certificate verification to download files from oss. + +try: + import torch +except ImportError: + pass + +from .smp import * +load_env() + +from .api import * +from .dataset import * +from .utils import * +from .vlm import * +from .config import * +from .tools import cli + + +__version__ = '0.2rc1' diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/base.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4e00d55ac7ab3e2f88ba5653c3dd2e767650694 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/base.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/bluelm_api.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/bluelm_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b22fed088cdc4f4b72fafa43759b911d526689b5 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/bluelm_api.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/cloudwalk.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/cloudwalk.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9db53b349dcbf226aa91e3d6d3178b64441a244 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/cloudwalk.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/doubao_vl_api.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/doubao_vl_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf2112d7d6b2141ccd65e67d22ea0e3e24689e30 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/doubao_vl_api.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/gpt.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/gpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2dc556281afbe1fc8e8db525873e865ed2eb2203 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/gpt.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/mug_u.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/mug_u.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6721252db914b24e711938bb864549612655172b Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/mug_u.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_api.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a553138f1fc49383a58e85f2fe4971c6a4c40eaf Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_api.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/sensechat_vision.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/sensechat_vision.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..934f3b23d06b50714ea17a07ad32a0b5971aecdc Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/sensechat_vision.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/siliconflow.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/siliconflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdfbe4b014b173edd85cf81e58b2aec460074357 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/siliconflow.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/api/__pycache__/taiyi.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/taiyi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a1aea72b5cc32ab40f959b18b7fe342aaacf080 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/api/__pycache__/taiyi.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/config.py b/VLMEvalKit-sudoku/vlmeval/config.py new file mode 100644 index 0000000000000000000000000000000000000000..0ef3549588f8987d1225299558b23a15d6498245 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/config.py @@ -0,0 +1,1659 @@ +from vlmeval.vlm import * +from vlmeval.api import * +from functools import partial +import os + +PandaGPT_ROOT = None +MiniGPT4_ROOT = None +TransCore_ROOT = None +Yi_ROOT = None +OmniLMM_ROOT = None +Mini_Gemini_ROOT = None +VXVERSE_ROOT = None +VideoChat2_ROOT = None +VideoChatGPT_ROOT = None +PLLaVA_ROOT = None +RBDash_ROOT = None +VITA_ROOT = None +LLAVA_V1_7B_MODEL_PTH = "Please set your local path to LLaVA-7B-v1.1 here, the model weight is obtained by merging LLaVA delta weight based on vicuna-7b-v1.1 in https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md with vicuna-7b-v1.1. " + +video_models = { + "Video-LLaVA-7B": partial(VideoLLaVA, model_path="LanguageBind/Video-LLaVA-7B"), + "Video-LLaVA-7B-HF": partial( + VideoLLaVA_HF, model_path="LanguageBind/Video-LLaVA-7B-hf" + ), + "VideoChat2-HD": partial( + VideoChat2_HD, + model_path="OpenGVLab/VideoChat2_HD_stage4_Mistral_7B", + root=VideoChat2_ROOT, + config_file="./vlmeval/vlm/video_llm/configs/videochat2_hd.json", + ), + "Chat-UniVi-7B": partial(Chatunivi, model_path="Chat-UniVi/Chat-UniVi"), + "Chat-UniVi-7B-v1.5": partial( + Chatunivi, model_path="Chat-UniVi/Chat-UniVi-7B-v1.5" + ), + "LLaMA-VID-7B": partial( + LLaMAVID, model_path="YanweiLi/llama-vid-7b-full-224-video-fps-1" + ), + "Video-ChatGPT": partial( + VideoChatGPT, model_path="MBZUAI/Video-ChatGPT-7B", dir_root=VideoChatGPT_ROOT + ), + "PLLaVA-7B": partial(PLLaVA, model_path="ermu2001/pllava-7b", dir_root=PLLaVA_ROOT), + "PLLaVA-13B": partial( + PLLaVA, model_path="ermu2001/pllava-13b", dir_root=PLLaVA_ROOT + ), + "PLLaVA-34B": partial( + PLLaVA, model_path="ermu2001/pllava-34b", dir_root=PLLaVA_ROOT + ), +} + +ungrouped = { + 'llava_uhd_resampler_query_49': partial(LLaVA_UHD_SIGLIP2_SLICE, model_path='https://huggingface.co/ZzzHelloWorld/llava-uhd-final/tree/main'), + 'llava_uhd_final': partial(LLaVA_UHD_SIGLIP2, model_path='https://huggingface.co/ZzzHelloWorld/llava_uhd_resampler_query_49'), +} + +o1_key = os.environ.get('O1_API_KEY', None) +o1_base = os.environ.get('O1_API_BASE', None) +o1_apis = { + 'o1': partial( + GPT4V, + model="o1-2024-12-17", + key=o1_key, + api_base=o1_base, + temperature=0, + img_detail='high', + retry=3, + timeout=1800, + max_tokens=16384, + verbose=False, + + ), + 'o3': partial( + GPT4V, + model="o3-2025-04-16", + key=o1_key, + api_base=o1_base, + temperature=0, + img_detail='high', + retry=3, + timeout=1800, + max_tokens=16384, + verbose=False, + ), + 'o4-mini': partial( + GPT4V, + model="o4-mini-2025-04-16", + key=o1_key, + api_base=o1_base, + temperature=0, + img_detail='high', + retry=3, + timeout=1800, + max_tokens=16384, + verbose=False, + ), +} + +api_models = { + # GPT + "GPT4V": partial( + GPT4V, + model="gpt-4-1106-vision-preview", + temperature=0, + img_size=512, + img_detail="low", + retry=10, + verbose=False, + ), + "GPT4V_HIGH": partial( + GPT4V, + model="gpt-4-1106-vision-preview", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4V_20240409": partial( + GPT4V, + model="gpt-4-turbo-2024-04-09", + temperature=0, + img_size=512, + img_detail="low", + retry=10, + verbose=False, + ), + "GPT4V_20240409_HIGH": partial( + GPT4V, + model="gpt-4-turbo-2024-04-09", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4o": partial( + GPT4V, + model="gpt-4o-2024-05-13", + temperature=0, + img_size=512, + img_detail="low", + retry=10, + verbose=False, + ), + "GPT4o_HIGH": partial( + GPT4V, + model="gpt-4o-2024-05-13", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4o_20240806": partial( + GPT4V, + model="gpt-4o-2024-08-06", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4o_20241120": partial( + GPT4V, + model="gpt-4o-2024-11-20", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "ChatGPT4o": partial( + GPT4V, + model="chatgpt-4o-latest", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4o_MINI": partial( + GPT4V, + model="gpt-4o-mini-2024-07-18", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "GPT4.5": partial( + GPT4V, + model='gpt-4.5-preview-2025-02-27', + temperature=0, + timeout=600, + img_size=-1, + img_detail='high', + retry=10, + verbose=False, + ), + "gpt-4.1-2025-04-14": partial( + GPT4V, + model="gpt-4.1-2025-04-14", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "gpt-4.1-mini-2025-04-14": partial( + GPT4V, + model="gpt-4.1-mini-2025-04-14", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "gpt-4.1-nano-2025-04-14": partial( + GPT4V, + model="gpt-4.1-nano-2025-04-14", + temperature=0, + img_size=-1, + img_detail="high", + retry=10, + verbose=False, + ), + "gpt-5-2025-08-07": partial( + GPT4V, + model="gpt-5-2025-08-07", + img_detail="high", + retry=3, + verbose=False, + max_tokens=2**14, + timeout=300, + ), + "gpt-5-mini-2025-08-07": partial( + GPT4V, + model="gpt-5-mini-2025-08-07", + img_detail="high", + retry=3, + verbose=False, + max_tokens=2**14, + timeout=300, + ), + "gpt-5-nano-2025-08-07": partial( + GPT4V, + model="gpt-5-nano-2025-08-07", + img_detail="high", + retry=3, + verbose=False, + max_tokens=2**14, + timeout=300, + ), + # Gemini + "GeminiPro1-0": partial( + Gemini, model="gemini-1.0-pro", temperature=0, retry=10 + ), # now GeminiPro1-0 is only supported by vertex backend + "GeminiPro1-5": partial( + Gemini, model="gemini-1.5-pro", temperature=0, retry=10 + ), + "GeminiFlash1-5": partial( + Gemini, model="gemini-1.5-flash", temperature=0, retry=10 + ), + "GeminiPro1-5-002": partial( + GPT4V, model="gemini-1.5-pro-002", temperature=0, retry=10 + ), # Internal Use Only + "GeminiFlash1-5-002": partial( + GPT4V, model="gemini-1.5-flash-002", temperature=0, retry=10 + ), # Internal Use Only + "GeminiFlash2-0": partial( + Gemini, model="gemini-2.0-flash", temperature=0, retry=10 + ), + "GeminiFlashLite2-0": partial( + Gemini, model="gemini-2.0-flash-lite", temperature=0, retry=10 + ), + "GeminiFlash2-5": partial( + Gemini, model="gemini-2.5-flash", temperature=0, retry=10 + ), + "GeminiPro2-5": partial( + Gemini, model="gemini-2.5-pro", temperature=0, retry=10 + ), + + # Qwen-VL + "QwenVLPlus": partial(QwenVLAPI, model="qwen-vl-plus", temperature=0, retry=10), + "QwenVLMax": partial(QwenVLAPI, model="qwen-vl-max", temperature=0, retry=10), + "QwenVLMax-250408": partial(QwenVLAPI, model="qwen-vl-max-2025-04-08", temperature=0, retry=10), + + # Reka + "RekaEdge": partial(Reka, model="reka-edge-20240208"), + "RekaFlash": partial(Reka, model="reka-flash-20240226"), + "RekaCore": partial(Reka, model="reka-core-20240415"), + # Step1V + "Step1V": partial( + GPT4V, + model="step-1v-32k", + api_base="https://api.stepfun.com/v1/chat/completions", + temperature=0, + retry=10, + img_size=-1, + img_detail="high", + ), + "Step1.5V-mini": partial( + GPT4V, + model="step-1.5v-mini", + api_base="https://api.stepfun.com/v1/chat/completions", + temperature=0, + retry=10, + img_size=-1, + img_detail="high", + ), + "Step1o": partial( + GPT4V, + model="step-1o-vision-32k", + api_base="https://api.stepfun.com/v1/chat/completions", + temperature=0, + retry=10, + img_size=-1, + img_detail="high", + ), + # Yi-Vision + "Yi-Vision": partial( + GPT4V, + model="yi-vision", + api_base="https://api.lingyiwanwu.com/v1/chat/completions", + temperature=0, + retry=10, + ), + # Claude + "Claude3V_Opus": partial( + Claude3V, model="claude-3-opus-20240229", temperature=0, retry=10, verbose=False + ), + "Claude3V_Sonnet": partial( + Claude3V, + model="claude-3-sonnet-20240229", + temperature=0, + retry=10, + verbose=False, + ), + "Claude3V_Haiku": partial( + Claude3V, + model="claude-3-haiku-20240307", + temperature=0, + retry=10, + verbose=False, + ), + "Claude3-5V_Sonnet": partial( + Claude3V, + model="claude-3-5-sonnet-20240620", + temperature=0, + retry=10, + verbose=False, + ), + "Claude3-5V_Sonnet_20241022": partial( + Claude3V, + model="claude-3-5-sonnet-20241022", + temperature=0, + retry=10, + verbose=False, + ), + "Claude3-7V_Sonnet": partial( + Claude3V, + model="claude-3-7-sonnet-20250219", + temperature=0, + retry=10, + verbose=False, + ), + "Claude4_Opus": partial( + Claude3V, + model="claude-4-opus-20250514", + temperature=0, + retry=10, + verbose=False, + timeout=1800 + ), + "Claude4_Sonnet": partial( + Claude3V, + model="claude-4-sonnet-20250514", + temperature=0, + retry=10, + verbose=False, + timeout=1800 + ), + # GLM4V + "GLM4V": partial(GLMVisionAPI, model="glm4v-biz-eval", temperature=0, retry=10), + "GLM4V_PLUS": partial(GLMVisionAPI, model="glm-4v-plus", temperature=0, retry=10), + "GLM4V_PLUS_20250111": partial( + GLMVisionAPI, model="glm-4v-plus-0111", temperature=0, retry=10 + ), + # MiniMax abab + "abab6.5s": partial( + GPT4V, + model="abab6.5s-chat", + api_base="https://api.minimax.chat/v1/chat/completions", + temperature=0, + retry=10, + ), + "abab7-preview": partial( + GPT4V, + model="abab7-chat-preview", + api_base="https://api.minimax.chat/v1/chat/completions", + temperature=0, + retry=10, + ), + # CongRong + "CongRong-v1.5": partial(CWWrapper, model="cw-congrong-v1.5", temperature=0, retry=10), + "CongRong-v2.0": partial(CWWrapper, model="cw-congrong-v2.0", temperature=0, retry=10), + # SenseNova + "SenseNova-V6-Pro": partial( + SenseChatVisionAPI, model="SenseNova-V6-Pro", temperature=0, retry=10 + ), + "SenseNova-V6-Reasoner": partial( + SenseChatVisionAPI, model="SenseNova-V6-Reasoner", temperature=0, retry=10 + ), + "SenseNova-V6-5-Pro": partial( + SenseChatVisionAPI, model="SenseNova-V6-5-Pro", retry=10 + ), + "HunYuan-Vision": partial( + HunyuanVision, model="hunyuan-vision", temperature=0, retry=10 + ), + "HunYuan-Standard-Vision": partial( + HunyuanVision, model="hunyuan-standard-vision", temperature=0, retry=10 + ), + "HunYuan-Large-Vision": partial( + HunyuanVision, model="hunyuan-large-vision", temperature=0, retry=10 + ), + "BailingMM-Lite-1203": partial( + bailingMMAPI, model="BailingMM-Lite-1203", temperature=0, retry=10 + ), + "BailingMM-Pro-0120": partial( + bailingMMAPI, model="BailingMM-Pro-0120", temperature=0, retry=10 + ), + # BlueLM-2.5 + "BlueLM-2.5-3B": partial(BlueLM_API, model="BlueLM-2.5-3B", temperature=0, retry=3), + # JiuTian-VL + "JTVL": partial(JTVLChatAPI, model="jt-vl-chat", temperature=0, retry=10), + "Taiyi": partial(TaiyiAPI, model="taiyi", temperature=0, retry=10), + # TeleMM + "TeleMM": partial(TeleMMAPI, model="TeleAI/TeleMM", temperature=0, retry=10), + "Qwen2.5-VL-32B-Instruct-SiliconFlow": partial( + SiliconFlowAPI, model="Qwen/Qwen2.5-VL-32B-Instruct", temperature=0, retry=10), + # lmdeploy api + "lmdeploy": partial( + LMDeployAPI, + api_base="http://0.0.0.0:23333/v1/chat/completions", + temperature=0, + retry=10, + ), + "lmdeploy_internvl_78B_MPO": partial( + LMDeployAPI, + api_base="http://0.0.0.0:23333/v1/chat/completions", + temperature=0, + retry=10, + timeout=100, + ), + "lmdeploy_qvq_72B_preview": partial( + LMDeployAPI, + api_base="http://0.0.0.0:23333/v1/chat/completions", + temperature=0, + retry=10, + timeout=300, + ), + 'Taichu-VLR-3B': partial( + TaichuVLRAPI, + model='taichu_vlr_3b', + url="https://platform.wair.ac.cn/maas/v1/chat/completions" + ), + 'Taichu-VLR-7B': partial( + TaichuVLRAPI, + model='taichu_vlr_7b', + url="https://platform.wair.ac.cn/maas/v1/chat/completions" + ), + # doubao_vl + "DoubaoVL": partial( + DoubaoVL, model="Doubao-1.5-vision-pro", temperature=0, retry=3, verbose=False + ), + "Seed1.5-VL": partial( + DoubaoVL, + model="doubao-1-5-thinking-vision-pro-250428", + temperature=0, + retry=3, + verbose=False, + max_tokens=16384, + ), + "Seed1.6": partial( + DoubaoVL, + model="doubao-seed-1.6-250615", + temperature=0, + retry=3, + verbose=False, + max_tokens=16384, + ), + "Seed1.6-Flash": partial( + DoubaoVL, + model="doubao-seed-1.6-flash-250615", + temperature=0, + retry=3, + verbose=False, + max_tokens=16384, + ), + "Seed1.6-Thinking": partial( + DoubaoVL, + model="doubao-seed-1.6-thinking-250615", + temperature=0, + retry=3, + verbose=False, + max_tokens=16384, + ), + # Shopee MUG-U + 'MUG-U-7B': partial( + MUGUAPI, + model='MUG-U', + temperature=0, + retry=10, + verbose=False, + timeout=300), + # grok + "grok-vision-beta": partial( + GPT4V, + model="grok-vision-beta", + api_base="https://api.x.ai/v1/chat/completions", + temperature=0, + retry=10, + ), + "grok-2-vision-1212": partial( + GPT4V, + model="grok-2-vision", + api_base="https://api.x.ai/v1/chat/completions", + temperature=0, + retry=10, + ), + "grok-4-0709": partial( + GPT4V, + model="grok-4-0709", + api_base="https://api.x.ai/v1/chat/completions", + temperature=0, + retry=3, + timeout=1200, + max_tokens=16384 + ), + # kimi + "moonshot-v1-8k": partial( + GPT4V, + model="moonshot-v1-8k-vision-preview", + api_base="https://api.moonshot.cn/v1/chat/completions", + temperature=0, + retry=10, + ), + "moonshot-v1-32k": partial( + GPT4V, + model="moonshot-v1-32k-vision-preview", + api_base="https://api.moonshot.cn/v1/chat/completions", + temperature=0, + retry=10, + ), + "moonshot-v1-128k": partial( + GPT4V, + model="moonshot-v1-128k-vision-preview", + api_base="https://api.moonshot.cn/v1/chat/completions", + temperature=0, + retry=10, + ), + 'ernie4.5-turbo': partial( + GPT4V, + model='ernie-4.5-turbo-vl-32k', + temperature=0, + retry=3, + max_tokens=12000, + ), + 'ernie4.5-a3b': partial( + GPT4V, + model='ernie-4.5-vl-28b-a3b', + temperature=0, + retry=3, + max_tokens=8000, + ) +} + +import copy as cp +api_models['gpt-5'] = cp.deepcopy(api_models['gpt-5-2025-08-07']) +api_models['gpt-5-mini'] = cp.deepcopy(api_models['gpt-5-mini-2025-08-07']) +api_models['gpt-5-nano'] = cp.deepcopy(api_models['gpt-5-nano-2025-08-07']) + +emu_series = { + "emu2_chat": partial(Emu, model_path="BAAI/Emu2-Chat"), + "emu3_chat": partial(Emu3_chat, model_path="BAAI/Emu3-Chat"), + "emu3_gen": partial(Emu3_gen, model_path="BAAI/Emu3-Gen"), +} + +granite_vision_series = { + 'granite_vision_3.1_2b_preview': partial(GraniteVision3, model_path="ibm-granite/granite-vision-3.1-2b-preview"), + 'granite_vision_3.2_2b': partial(GraniteVision3, model_path="ibm-granite/granite-vision-3.2-2b"), + 'granite_vision_3.3_2b': partial(GraniteVision3, model_path="ibm-granite/granite-vision-3.3-2b"), +} + +mmalaya_series = { + "MMAlaya": partial(MMAlaya, model_path="DataCanvas/MMAlaya"), + "MMAlaya2": partial(MMAlaya2, model_path="DataCanvas/MMAlaya2"), +} + +minicpm_series = { + "MiniCPM-V": partial(MiniCPM_V, model_path="openbmb/MiniCPM-V"), + "MiniCPM-V-2": partial(MiniCPM_V, model_path="openbmb/MiniCPM-V-2"), + "MiniCPM-Llama3-V-2_5": partial( + MiniCPM_Llama3_V, model_path="openbmb/MiniCPM-Llama3-V-2_5" + ), + "MiniCPM-V-2_6": partial(MiniCPM_V_2_6, model_path="openbmb/MiniCPM-V-2_6"), + "MiniCPM-o-2_6": partial(MiniCPM_o_2_6, model_path="openbmb/MiniCPM-o-2_6"), + "MiniCPM-V-4": partial(MiniCPM_V_4, model_path="openbmb/MiniCPM-V-4"), + "MiniCPM-V-4_5": partial(MiniCPM_V_4_5, model_path="openbmb/MiniCPM-V-4_5"), +} + +xtuner_series = { + "llava-internlm2-7b": partial( + LLaVA_XTuner, + llm_path="internlm/internlm2-chat-7b", + llava_path="xtuner/llava-internlm2-7b", + visual_select_layer=-2, + prompt_template="internlm2_chat", + ), + "llava-internlm2-20b": partial( + LLaVA_XTuner, + llm_path="internlm/internlm2-chat-20b", + llava_path="xtuner/llava-internlm2-20b", + visual_select_layer=-2, + prompt_template="internlm2_chat", + ), + "llava-internlm-7b": partial( + LLaVA_XTuner, + llm_path="internlm/internlm-chat-7b", + llava_path="xtuner/llava-internlm-7b", + visual_select_layer=-2, + prompt_template="internlm_chat", + ), + "llava-v1.5-7b-xtuner": partial( + LLaVA_XTuner, + llm_path="lmsys/vicuna-7b-v1.5", + llava_path="xtuner/llava-v1.5-7b-xtuner", + visual_select_layer=-2, + prompt_template="vicuna", + ), + "llava-v1.5-13b-xtuner": partial( + LLaVA_XTuner, + llm_path="lmsys/vicuna-13b-v1.5", + llava_path="xtuner/llava-v1.5-13b-xtuner", + visual_select_layer=-2, + prompt_template="vicuna", + ), + "llava-llama-3-8b": partial( + LLaVA_XTuner, + llm_path="xtuner/llava-llama-3-8b-v1_1", + llava_path="xtuner/llava-llama-3-8b-v1_1", + visual_select_layer=-2, + prompt_template="llama3_chat", + ), +} + +qwen_series = { + "qwen_base": partial(QwenVL, model_path="Qwen/Qwen-VL"), + "qwen_chat": partial(QwenVLChat, model_path="Qwen/Qwen-VL-Chat"), + "monkey": partial(Monkey, model_path="echo840/Monkey"), + "monkey-chat": partial(MonkeyChat, model_path="echo840/Monkey-Chat"), + "minimonkey": partial(MiniMonkey, model_path="mx262/MiniMonkey"), +} + +thyme_series = { + "Thyme-7B": partial(Thyme, model_path="Kwai-Keye/Thyme-RL") +} + +llava_series = { + "llava_v1.5_7b": partial(LLaVA, model_path="liuhaotian/llava-v1.5-7b"), + "llava_v1.5_13b": partial(LLaVA, model_path="liuhaotian/llava-v1.5-13b"), + "llava_v1_7b": partial(LLaVA, model_path=LLAVA_V1_7B_MODEL_PTH), + "sharegpt4v_7b": partial(LLaVA, model_path="Lin-Chen/ShareGPT4V-7B"), + "sharegpt4v_13b": partial(LLaVA, model_path="Lin-Chen/ShareGPT4V-13B"), + "llava_next_vicuna_7b": partial( + LLaVA_Next, model_path="llava-hf/llava-v1.6-vicuna-7b-hf" + ), + "llava_next_vicuna_13b": partial( + LLaVA_Next, model_path="llava-hf/llava-v1.6-vicuna-13b-hf" + ), + "llava_next_mistral_7b": partial( + LLaVA_Next, model_path="llava-hf/llava-v1.6-mistral-7b-hf" + ), + "llava_next_yi_34b": partial(LLaVA_Next, model_path="llava-hf/llava-v1.6-34b-hf"), + "llava_next_llama3": partial( + LLaVA_Next, model_path="llava-hf/llama3-llava-next-8b-hf" + ), + "llava_next_72b": partial(LLaVA_Next, model_path="llava-hf/llava-next-72b-hf"), + "llava_next_110b": partial(LLaVA_Next, model_path="llava-hf/llava-next-110b-hf"), + "llava_next_qwen_32b": partial( + LLaVA_Next2, model_path="lmms-lab/llava-next-qwen-32b" + ), + "llava_next_interleave_7b": partial( + LLaVA_Next, model_path="llava-hf/llava-interleave-qwen-7b-hf" + ), + "llava_next_interleave_7b_dpo": partial( + LLaVA_Next, model_path="llava-hf/llava-interleave-qwen-7b-dpo-hf" + ), + "llava-onevision-qwen2-0.5b-ov-hf": partial( + LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-0.5b-ov-hf" + ), + "llava-onevision-qwen2-0.5b-si-hf": partial( + LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-0.5b-si-hf" + ), + "llava-onevision-qwen2-7b-ov-hf": partial( + LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-7b-ov-hf" + ), + "llava-onevision-qwen2-7b-si-hf": partial( + LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-7b-si-hf" + ), + "llava_onevision_qwen2_0.5b_si": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-0.5b-si" + ), + "llava_onevision_qwen2_7b_si": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-7b-si" + ), + "llava_onevision_qwen2_72b_si": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-72b-si" + ), + "llava_onevision_qwen2_0.5b_ov": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-0.5b-ov" + ), + "llava_onevision_qwen2_7b_ov": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-7b-ov" + ), + "llava_onevision_qwen2_72b_ov": partial( + LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-72b-ov-sft" + ), + "Aquila-VL-2B": partial(LLaVA_OneVision, model_path="BAAI/Aquila-VL-2B-llava-qwen"), + "llava_video_qwen2_7b": partial( + LLaVA_OneVision, model_path="lmms-lab/LLaVA-Video-7B-Qwen2" + ), + "llava_video_qwen2_72b": partial( + LLaVA_OneVision, model_path="lmms-lab/LLaVA-Video-72B-Qwen2" + ), +} + +varco_vision_series = { + "varco-vision-hf": partial( + LLaVA_OneVision_HF, model_path="NCSOFT/VARCO-VISION-14B-HF" + ), + "varco-vision-2-1.7b": partial( + VarcoVision, model_path="NCSOFT/VARCO-VISION-2.0-1.7B" + ), + "varco-vision-2-14b": partial( + VarcoVision, model_path="NCSOFT/VARCO-VISION-2.0-14B" + ), +} + +vita_series = { + "vita": partial(VITA, model_path="VITA-MLLM/VITA", root=VITA_ROOT), + "vita_qwen2": partial(VITAQwen2, model_path="VITA-MLLM/VITA-1.5", root=VITA_ROOT), +} + +long_vita_series = { + "Long-VITA-16K": partial( + LongVITA, model_path="VITA-MLLM/Long-VITA-16K_HF", max_num_frame=128 + ), + "Long-VITA-128K": partial( + LongVITA, model_path="VITA-MLLM/Long-VITA-128K_HF", max_num_frame=256 + ), + "Long-VITA-1M": partial( + LongVITA, model_path="VITA-MLLM/Long-VITA-1M_HF", max_num_frame=256 + ), +} + +internvl = { + "InternVL-Chat-V1-1": partial( + InternVLChat, model_path="OpenGVLab/InternVL-Chat-V1-1", version="V1.1" + ), + "InternVL-Chat-V1-2": partial( + InternVLChat, model_path="OpenGVLab/InternVL-Chat-V1-2", version="V1.2" + ), + "InternVL-Chat-V1-2-Plus": partial( + InternVLChat, model_path="OpenGVLab/InternVL-Chat-V1-2-Plus", version="V1.2" + ), + "InternVL-Chat-V1-5": partial( + InternVLChat, + model_path="OpenGVLab/InternVL-Chat-V1-5", + version="V1.5", + ) +} + +mini_internvl = { + "Mini-InternVL-Chat-2B-V1-5": partial( + InternVLChat, model_path="OpenGVLab/Mini-InternVL-Chat-2B-V1-5", version="V1.5" + ), + "Mini-InternVL-Chat-4B-V1-5": partial( + InternVLChat, model_path="OpenGVLab/Mini-InternVL-Chat-4B-V1-5", version="V1.5" + ), +} + +internvl2 = { + "InternVL2-1B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-1B", version="V2.0" + ), + "InternVL2-2B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-2B", version="V2.0" + ), + "InternVL2-4B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-4B", version="V2.0" + ), + "InternVL2-8B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-8B", version="V2.0" + ), + "InternVL2-26B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-26B", version="V2.0" + ), + "InternVL2-40B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-40B", version="V2.0" + ), + "InternVL2-76B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-Llama3-76B", version="V2.0" + ), + "InternVL2-8B-MPO": partial( + InternVLChat, model_path="OpenGVLab/InternVL2-8B-MPO", version="V2.0" + ), + "InternVL2-8B-MPO-CoT": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2-8B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), +} + +internvl2_5 = { + "InternVL2_5-1B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-1B", version="V2.0" + ), + "InternVL2_5-2B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-2B", version="V2.0" + ), + "QTuneVL1-2B": partial( + InternVLChat, model_path="hanchaow/QTuneVL1-2B", version="V2.0" + ), + "InternVL2_5-4B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-4B", version="V2.0" + ), + "InternVL2_5-8B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-8B", version="V2.0" + ), + "InternVL2_5-26B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-26B", version="V2.0" + ), + "InternVL2_5-38B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-38B", version="V2.0" + ), + "InternVL2_5-78B": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-78B", version="V2.0" + ), + # InternVL2.5 series with Best-of-N evaluation + "InternVL2_5-8B-BoN-8": partial( + InternVLChat, model_path="OpenGVLab/InternVL2_5-8B", version="V2.0", + best_of_n=8, reward_model_path="OpenGVLab/VisualPRM-8B", + ), +} + +internvl2_5_mpo = { + "InternVL2_5-1B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-1B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-2B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-2B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-4B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-4B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-8B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-8B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-26B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-26B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-38B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-38B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-78B-MPO": partial( + InternVLChat, + model_path="OpenGVLab/InternVL2_5-78B-MPO", + version="V2.0", + use_mpo_prompt=True, + ), + "InternVL2_5-8B-GUI": partial( + InternVLChat, + model_path="/fs-computility/mllm1/shared/zhaoxiangyu/models/internvl2_5_8b_internlm2_5_7b_dynamic_res_stage1", + version="V2.0", + max_new_tokens=512, + screen_parse=False, + ), + "InternVL3-7B-GUI": partial( + InternVLChat, + model_path="/fs-computility/mllm1/shared/zhaoxiangyu/GUI/checkpoints/internvl3_7b_dynamic_res_stage1_56/", + version="V2.0", + max_new_tokens=512, + screen_parse=False, + ), +} + +internvl3 = { + "InternVL3-1B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3-1B", version="V2.0" + ), + "InternVL3-2B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3-2B", version="V2.0" + ), + "InternVL3-8B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3-8B", version="V2.0", + ), + "InternVL3-9B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3-9B", version="V2.0" + ), + "InternVL3-14B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3-14B", version="V2.0" + ), + "InternVL3-38B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3-38B", version="V2.0" + ), + "InternVL3-78B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3-78B", version="V2.0" + ), +} + +internvl3_5 = { + "InternVL3_5-1B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-1B", version="V2.0" + ), + "InternVL3_5-2B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-2B", version="V2.0" + ), + "InternVL3_5-4B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-4B", version="V2.0" + ), + "InternVL3_5-8B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-8B", version="V2.0" + ), + "InternVL3_5-14B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-14B", version="V2.0" + ), + "InternVL3_5-GPT-OSS-20B-A4B-Preview": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-GPT-OSS-20B-A4B-Preview", version="V2.0" + ), + "InternVL3_5-30B-A3B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-30B-A3B", version="V2.0" + ), + "InternVL3_5-38B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-38B", version="V2.0" + ), + "InternVL3_5-241B-A28B": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-241B-A28B", version="V2.0" + ), + + "InternVL3_5-1B-Thinking": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-1B", use_lmdeploy=True, + max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0" + ), + "InternVL3_5-2B-Thinking": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-2B", use_lmdeploy=True, + max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0" + ), + "InternVL3_5-4B-Thinking": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-4B", use_lmdeploy=True, + max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0" + ), + "InternVL3_5-8B-Thinking": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-8B", use_lmdeploy=True, + max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0" + ), + "InternVL3_5-14B-Thinking": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-14B", use_lmdeploy=True, + max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0" + ), + "InternVL3_5-GPT-OSS-20B-A4B-Preview-Thinking": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-GPT-OSS-20B-A4B-Preview", use_lmdeploy=True, + max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0" + ), + "InternVL3_5-30B-A3B-Thinking": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-30B-A3B", use_lmdeploy=True, + max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0" + ), + "InternVL3_5-38B-Thinking": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-38B", use_lmdeploy=True, + max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0" + ), + "InternVL3_5-241B-A28B-Thinking": partial( + InternVLChat, model_path="OpenGVLab/InternVL3_5-241B-A28B", use_lmdeploy=True, + max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0" + ), +} + +sail_series = { + "SAIL-VL-2B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-2B"), + "SAIL-VL-1.5-2B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d5-2B", use_msac = True), + "SAIL-VL-1.5-8B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d5-8B", use_msac = True), + "SAIL-VL-1.6-8B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d6-8B", use_msac = True), + "SAIL-VL-1.7-Thinking-2B-2507": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d7-Thinking-2B-2507", use_msac = True, use_cot=True, max_new_tokens=4096), + "SAIL-VL-1.7-Thinking-8B-2507": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d7-Thinking-8B-2507", use_msac = True, use_cot=True, max_new_tokens=4096), + "SAIL-VL2-2B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL2-2B", use_msac = True), + "SAIL-VL2-8B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL2-8B", use_msac = True), +} + +ristretto_series = { + "Ristretto-3B": partial(Ristretto, model_path="LiAutoAD/Ristretto-3B"), +} + +yivl_series = { + "Yi_VL_6B": partial(Yi_VL, model_path="01-ai/Yi-VL-6B", root=Yi_ROOT), + "Yi_VL_34B": partial(Yi_VL, model_path="01-ai/Yi-VL-34B", root=Yi_ROOT), +} + +xcomposer_series = { + "XComposer": partial(XComposer, model_path="internlm/internlm-xcomposer-vl-7b"), + "sharecaptioner": partial(ShareCaptioner, model_path="Lin-Chen/ShareCaptioner"), + "XComposer2": partial(XComposer2, model_path="internlm/internlm-xcomposer2-vl-7b"), + "XComposer2_1.8b": partial( + XComposer2, model_path="internlm/internlm-xcomposer2-vl-1_8b" + ), + "XComposer2_4KHD": partial( + XComposer2_4KHD, model_path="internlm/internlm-xcomposer2-4khd-7b" + ), + "XComposer2d5": partial( + XComposer2d5, model_path="internlm/internlm-xcomposer2d5-7b" + ), +} + +minigpt4_series = { + "MiniGPT-4-v2": partial(MiniGPT4, mode="v2", root=MiniGPT4_ROOT), + "MiniGPT-4-v1-7B": partial(MiniGPT4, mode="v1_7b", root=MiniGPT4_ROOT), + "MiniGPT-4-v1-13B": partial(MiniGPT4, mode="v1_13b", root=MiniGPT4_ROOT), +} + +idefics_series = { + "idefics_9b_instruct": partial( + IDEFICS, model_path="HuggingFaceM4/idefics-9b-instruct" + ), + "idefics_80b_instruct": partial( + IDEFICS, model_path="HuggingFaceM4/idefics-80b-instruct" + ), + "idefics2_8b": partial(IDEFICS2, model_path="HuggingFaceM4/idefics2-8b"), + # Idefics3 follows Idefics2 Pattern + "Idefics3-8B-Llama3": partial( + IDEFICS2, model_path="HuggingFaceM4/Idefics3-8B-Llama3" + ), +} + +smolvlm_series = { + "SmolVLM-256M": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-256M-Instruct"), + "SmolVLM-500M": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-500M-Instruct"), + "SmolVLM": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-Instruct"), + "SmolVLM-DPO": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-Instruct-DPO"), + "SmolVLM-Synthetic": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-Synthetic"), + "SmolVLM2-256M": partial( + SmolVLM2, model_path="HuggingFaceTB/SmolVLM2-256M-Video-Instruct" + ), + "SmolVLM2-500M": partial( + SmolVLM2, model_path="HuggingFaceTB/SmolVLM2-500M-Video-Instruct" + ), + "SmolVLM2": partial(SmolVLM2, model_path="HuggingFaceTB/SmolVLM2-2.2B-Instruct"), +} + +instructblip_series = { + "instructblip_7b": partial(InstructBLIP, name="instructblip_7b"), + "instructblip_13b": partial(InstructBLIP, name="instructblip_13b"), +} + +deepseekvl_series = { + "deepseek_vl_7b": partial(DeepSeekVL, model_path="deepseek-ai/deepseek-vl-7b-chat"), + "deepseek_vl_1.3b": partial( + DeepSeekVL, model_path="deepseek-ai/deepseek-vl-1.3b-chat" + ), +} + +deepseekvl2_series = { + "deepseek_vl2_tiny": partial( + DeepSeekVL2, model_path="deepseek-ai/deepseek-vl2-tiny" + ), + "deepseek_vl2_small": partial( + DeepSeekVL2, model_path="deepseek-ai/deepseek-vl2-small" + ), + "deepseek_vl2": partial(DeepSeekVL2, model_path="deepseek-ai/deepseek-vl2"), +} + +janus_series = { + "Janus-1.3B": partial(Janus, model_path="deepseek-ai/Janus-1.3B"), + "Janus-Pro-1B": partial(Janus, model_path="deepseek-ai/Janus-Pro-1B"), + "Janus-Pro-7B": partial(Janus, model_path="deepseek-ai/Janus-Pro-7B"), +} + +cogvlm_series = { + "cogvlm-grounding-generalist": partial( + CogVlm, + model_path="THUDM/cogvlm-grounding-generalist-hf", + tokenizer_name="lmsys/vicuna-7b-v1.5", + ), + "cogvlm-chat": partial( + CogVlm, model_path="THUDM/cogvlm-chat-hf", tokenizer_name="lmsys/vicuna-7b-v1.5" + ), + "cogvlm2-llama3-chat-19B": partial( + CogVlm, model_path="THUDM/cogvlm2-llama3-chat-19B" + ), + "glm-4v-9b": partial(GLM4v, model_path="THUDM/glm-4v-9b"), + "GLM4_1VThinking-9b": partial(GLMThinking, model_path="THUDM/GLM-4.1V-9B-Thinking"), + "GLM4_5V": partial(GLMThinking, model_path="THUDM/GLM-4.5V"), +} + +wemm_series = { + "WeMM": partial(WeMM, model_path="feipengma/WeMM"), +} + +cambrian_series = { + "cambrian_8b": partial(Cambrian, model_path="nyu-visionx/cambrian-8b"), + "cambrian_13b": partial(Cambrian, model_path="nyu-visionx/cambrian-13b"), + "cambrian_34b": partial(Cambrian, model_path="nyu-visionx/cambrian-34b"), +} + +chameleon_series = { + "chameleon_7b": partial(Chameleon, model_path="facebook/chameleon-7b"), + "chameleon_30b": partial(Chameleon, model_path="facebook/chameleon-30b"), +} + +vila_series = { + "VILA1.5-3b": partial(VILA, model_path="Efficient-Large-Model/VILA1.5-3b"), + "Llama-3-VILA1.5-8b": partial( + VILA, model_path="Efficient-Large-Model/Llama-3-VILA1.5-8b" + ), + "VILA1.5-13b": partial(VILA, model_path="Efficient-Large-Model/VILA1.5-13b"), + "VILA1.5-40b": partial(VILA, model_path="Efficient-Large-Model/VILA1.5-40b"), + "NVILA-8B": partial(NVILA, model_path="Efficient-Large-Model/NVILA-8B"), + "NVILA-15B": partial(NVILA, model_path="Efficient-Large-Model/NVILA-15B"), +} + +ovis_series = { + "Ovis1.5-Llama3-8B": partial(Ovis, model_path="AIDC-AI/Ovis1.5-Llama3-8B"), + "Ovis1.5-Gemma2-9B": partial(Ovis, model_path="AIDC-AI/Ovis1.5-Gemma2-9B"), + "Ovis1.6-Gemma2-9B": partial(Ovis1_6, model_path="AIDC-AI/Ovis1.6-Gemma2-9B"), + "Ovis1.6-Llama3.2-3B": partial(Ovis1_6, model_path="AIDC-AI/Ovis1.6-Llama3.2-3B"), + "Ovis1.6-Gemma2-27B": partial( + Ovis1_6_Plus, model_path="AIDC-AI/Ovis1.6-Gemma2-27B" + ), + "Ovis2-1B": partial(Ovis2, model_path="AIDC-AI/Ovis2-1B"), + "Ovis2-2B": partial(Ovis2, model_path="AIDC-AI/Ovis2-2B"), + "Ovis2-4B": partial(Ovis2, model_path="AIDC-AI/Ovis2-4B"), + "Ovis2-8B": partial(Ovis2, model_path="AIDC-AI/Ovis2-8B"), + "Ovis2-16B": partial(Ovis2, model_path="AIDC-AI/Ovis2-16B"), + "Ovis2-34B": partial(Ovis2, model_path="AIDC-AI/Ovis2-34B"), + "Ovis-U1-3B": partial(OvisU1, model_path="AIDC-AI/Ovis-U1-3B"), +} + +mantis_series = { + "Mantis-8B-siglip-llama3": partial( + Mantis, model_path="TIGER-Lab/Mantis-8B-siglip-llama3" + ), + "Mantis-8B-clip-llama3": partial( + Mantis, model_path="TIGER-Lab/Mantis-8B-clip-llama3" + ), + "Mantis-8B-Idefics2": partial(Mantis, model_path="TIGER-Lab/Mantis-8B-Idefics2"), + "Mantis-8B-Fuyu": partial(Mantis, model_path="TIGER-Lab/Mantis-8B-Fuyu"), +} + +phi3_series = { + "Phi-3-Vision": partial( + Phi3Vision, model_path="microsoft/Phi-3-vision-128k-instruct" + ), + "Phi-3.5-Vision": partial( + Phi3_5Vision, model_path="microsoft/Phi-3.5-vision-instruct" + ), +} + +phi4_series = { + 'Phi-4-Vision': partial(Phi4Multimodal, model_path='microsoft/Phi-4-multimodal-instruct'), +} + +xgen_mm_series = { + "xgen-mm-phi3-interleave-r-v1.5": partial( + XGenMM, model_path="Salesforce/xgen-mm-phi3-mini-instruct-interleave-r-v1.5" + ), + "xgen-mm-phi3-dpo-r-v1.5": partial( + XGenMM, model_path="Salesforce/xgen-mm-phi3-mini-instruct-dpo-r-v1.5" + ), +} + +hawkvl_series = { + "HawkVL-2B": partial( + HawkVL, + model_path="xjtupanda/HawkVL-2B", + min_pixels=4 * 28 * 28, + max_pixels=6800 * 28 * 28, + use_custom_prompt=True + ) +} + +qwen2vl_series = { + "Qwen-VL-Max-20250813": partial( + Qwen2VLAPI, + model="qwen-vl-max-2025-08-13", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + max_length=8192, + ), + "Qwen-VL-Max-0809": partial( + Qwen2VLAPI, + model="qwen-vl-max-0809", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen-VL-Plus-0809": partial( + Qwen2VLAPI, + model="qwen-vl-plus-0809", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "QVQ-72B-Preview": partial( + Qwen2VLChat, + model_path="Qwen/QVQ-72B-Preview", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + system_prompt="You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.", + max_new_tokens=8192, + post_process=False, + ), + "Qwen2-VL-72B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-72B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-7B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-7B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-7B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-7B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-7B-Instruct-GPTQ-Int4": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-7B-Instruct-GPTQ-Int8": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-2B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-2B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-2B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-2B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-2B-Instruct-GPTQ-Int4": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2-VL-2B-Instruct-GPTQ-Int8": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "XinYuan-VL-2B-Instruct": partial( + Qwen2VLChat, + model_path="Cylingo/Xinyuan-VL-2B", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + ), + "Qwen2.5-VL-3B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-3B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-3B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-3B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-7B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-7B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-7B-Instruct-ForVideo": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-7B-Instruct", + min_pixels=128 * 28 * 28, + max_pixels=768 * 28 * 28, + total_pixels=24576 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-7B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-7B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-32B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-32B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-72B-Instruct": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-72B-Instruct", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "MiMo-VL-7B-SFT": partial( + Qwen2VLChat, + model_path="XiaomiMiMo/MiMo-VL-7B-SFT", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + use_lmdeploy=True + ), + "MiMo-VL-7B-RL": partial( + Qwen2VLChat, + model_path="XiaomiMiMo/MiMo-VL-7B-RL", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + use_lmdeploy=True + ), + "Qwen2.5-VL-72B-Instruct-ForVideo": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-72B-Instruct", + min_pixels=128 * 28 * 28, + max_pixels=768 * 28 * 28, + total_pixels=24576 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-VL-72B-Instruct-AWQ": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-VL-72B-Instruct-AWQ", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + "Qwen2.5-Omni-7B-ForVideo": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-Omni-7B", + min_pixels=128 * 28 * 28, + max_pixels=768 * 28 * 28, + total_pixels=24576 * 28 * 28, + use_custom_prompt=False, + use_audio_in_video=True, # set use audio in video + ), + "Qwen2.5-Omni-7B": partial( + Qwen2VLChat, + model_path="Qwen/Qwen2.5-Omni-7B", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=False, + ), + 'VLM-R1': partial( + VLMR1Chat, + model_path='omlab/VLM-R1-Qwen2.5VL-3B-Math-0305', + min_pixels=1280*28*28, + max_pixels=16384*28*28, + use_custom_prompt=False), + 'VLAA-Thinker-Qwen2.5VL-3B': partial( + VLAAThinkerChat, + model_path='UCSC-VLAA/VLAA-Thinker-Qwen2.5VL-3B', + min_pixels=1280*28*28, + max_pixels=16384*28*28, + use_custom_prompt=False, + post_process=True, # post processing for evaluation + system_prompt=('' + "You are VL-Thinking🤔, a helpful assistant with excellent reasoning ability." + " A user asks you a question, and you should try to solve it." + " You should first think about the reasoning process in the mind and then provides the user with the answer." + " The reasoning process and answer are enclosed within and" + " tags, respectively, i.e., reasoning process here " + " answer here " + ), + ), + 'VLAA-Thinker-Qwen2.5VL-7B': partial( + VLAAThinkerChat, + model_path='UCSC-VLAA/VLAA-Thinker-Qwen2.5VL-7B', + min_pixels=1280*28*28, + max_pixels=16384*28*28, + use_custom_prompt=False, + post_process=True, # post processing for evaluation + system_prompt=('' + "You are VL-Thinking🤔, a helpful assistant with excellent reasoning ability." + " A user asks you a question, and you should try to solve it." + " You should first think about the reasoning process in the mind and then provides the user with the answer." + " The reasoning process and answer are enclosed within and" + " tags, respectively, i.e., reasoning process here " + " answer here " + ), + ), + 'WeThink-Qwen2.5VL-7B': partial( + WeThinkVL, + model_path='yangjie-cv/WeThink-Qwen2.5VL-7B', + min_pixels=1280*28*28, + max_pixels=16384*28*28, + use_custom_prompt=False, + system_prompt=("You FIRST think about the reasoning process as an internal monologue and then provide the final answer.\nThe reasoning process MUST BE enclosed within tags. The final answer MUST BE enclosed within tags." + ), + ), +} + +slime_series = { + "Slime-7B": partial(SliME, model_path="yifanzhang114/SliME-vicuna-7B"), + "Slime-8B": partial(SliME, model_path="yifanzhang114/SliME-Llama3-8B"), + "Slime-13B": partial(SliME, model_path="yifanzhang114/SliME-vicuna-13B"), +} + +eagle_series = { + "Eagle-X4-8B-Plus": partial(Eagle, model_path="NVEagle/Eagle-X4-8B-Plus"), + "Eagle-X4-13B-Plus": partial(Eagle, model_path="NVEagle/Eagle-X4-13B-Plus"), + "Eagle-X5-7B": partial(Eagle, model_path="NVEagle/Eagle-X5-7B"), + "Eagle-X5-13B": partial(Eagle, model_path="NVEagle/Eagle-X5-13B"), + "Eagle-X5-13B-Chat": partial(Eagle, model_path="NVEagle/Eagle-X5-13B-Chat"), + "Eagle-X5-34B-Chat": partial(Eagle, model_path="NVEagle/Eagle-X5-34B-Chat"), + "Eagle-X5-34B-Plus": partial(Eagle, model_path="NVEagle/Eagle-X5-34B-Plus"), +} + +moondream_series = { + "Moondream1": partial(Moondream1, model_path="vikhyatk/moondream1"), + "Moondream2": partial(Moondream2, model_path="vikhyatk/moondream2"), +} + +llama_series = { + "Llama-3.2-11B-Vision-Instruct": partial( + llama_vision, model_path="meta-llama/Llama-3.2-11B-Vision-Instruct" + ), + "LLaVA-CoT": partial(llama_vision, model_path="Xkev/Llama-3.2V-11B-cot"), + "Llama-3.2-90B-Vision-Instruct": partial( + llama_vision, model_path="meta-llama/Llama-3.2-90B-Vision-Instruct" + ), + "Llama-4-Scout-17B-16E-Instruct": partial( + llama4, model_path="meta-llama/Llama-4-Scout-17B-16E-Instruct", use_vllm=True + ), +} + +molmo_series = { + "molmoE-1B-0924": partial(molmo, model_path="allenai/MolmoE-1B-0924"), + "molmo-7B-D-0924": partial(molmo, model_path="allenai/Molmo-7B-D-0924"), + "molmo-7B-O-0924": partial(molmo, model_path="allenai/Molmo-7B-O-0924"), + "molmo-72B-0924": partial(molmo, model_path="allenai/Molmo-72B-0924"), +} + +kosmos_series = { + "Kosmos2": partial(Kosmos2, model_path="microsoft/kosmos-2-patch14-224") +} + +points_series = { + "POINTS-Yi-1.5-9B-Chat": partial( + POINTS, model_path="WePOINTS/POINTS-Yi-1-5-9B-Chat" + ), + "POINTS-Qwen-2.5-7B-Chat": partial( + POINTS, model_path="WePOINTS/POINTS-Qwen-2-5-7B-Chat" + ), + "POINTSV15-Qwen-2.5-7B-Chat": partial( + POINTSV15, model_path="WePOINTS/POINTS-1-5-Qwen-2-5-7B-Chat" + ), +} + +nvlm_series = { + "NVLM": partial(NVLM, model_path="nvidia/NVLM-D-72B"), +} + +vintern_series = { + "Vintern-3B-beta": partial(VinternChat, model_path="5CD-AI/Vintern-3B-beta"), + "Vintern-1B-v2": partial(VinternChat, model_path="5CD-AI/Vintern-1B-v2"), +} + +aria_series = {"Aria": partial(Aria, model_path="rhymes-ai/Aria")} + +h2ovl_series = { + "h2ovl-mississippi-2b": partial(H2OVLChat, model_path="h2oai/h2ovl-mississippi-2b"), + "h2ovl-mississippi-1b": partial( + H2OVLChat, model_path="h2oai/h2ovl-mississippi-800m" + ), +} + +valley_series = { + "valley2": partial( + Valley2Chat, model_path="bytedance-research/Valley-Eagle-7B" + ), + "valley2_dpo": partial( + Valley2Chat, model_path="bytedance-research/Valley2-DPO" + ), +} + +ola_series = { + "ola": partial(Ola, model_path="THUdyh/Ola-7b"), +} + +xvl_series = { + "X-VL-4B": partial(X_VL_HF, model_path="YannQi/X-VL-4B", temperature=0, retry=10), +} + +ross_series = { + "ross-qwen2-7b": partial(Ross, model_path="HaochenWang/ross-qwen2-7b"), +} + +ursa_series = { + "URSA-8B": partial(UrsaChat, model_path="URSA-MATH/URSA-8B"), + "URSA-8B-PS-GRPO": partial(UrsaChat, model_path="URSA-MATH/URSA-8B-PS-GRPO") +} + +gemma_series = { + "paligemma-3b-mix-448": partial( + PaliGemma, model_path="google/paligemma-3b-mix-448" + ), + 'Gemma3-4B': partial(Gemma3, model_path='google/gemma-3-4b-it'), + 'Gemma3-12B': partial(Gemma3, model_path='google/gemma-3-12b-it'), + 'Gemma3-27B': partial(Gemma3, model_path='google/gemma-3-27b-it') +} + +aguvis_series = { + "aguvis_7b": partial( + Qwen2VLChatAguvis, + model_path=os.getenv( + "EVAL_MODEL", + "xlangai/Aguvis-7B-720P", + ), + min_pixels=256 * 28 * 28, + max_pixels=46 * 26 * 28 * 28, + use_custom_prompt=False, + mode='grounding', + ) +} + +kimi_series = { + 'Kimi-VL-A3B-Thinking': partial(KimiVL, model_path='moonshotai/Kimi-VL-A3B-Thinking'), + 'Kimi-VL-A3B-Instruct': partial(KimiVL, model_path='moonshotai/Kimi-VL-A3B-Instruct'), + 'Kimi-VL-A3B-Thinking-2506': partial(KimiVL, model_path='moonshotai/Kimi-VL-A3B-Thinking-2506', temperature=0.8, max_tokens=32768, extract_summary=True) +} + +flash_vl = { + 'Flash-VL-2B-Dynamic-ISS': partial(FlashVL, model_path='FlashVL/FlashVL-2B-Dynamic-ISS') +} + + +oryx_series = { + 'oryx': partial(Oryx, model_path="THUdyh/Oryx-1.5-7B"), +} + +# recommend: vllm serve moonshotai/Kimi-VL-A3B-Thinking-2506 +# --served-model-name api-kimi-vl-thinking-2506 --trust-remote-code +# --tensor-parallel-size 2 --max-num-batched-tokens 131072 +# --max-model-len 131072 --limit-mm-per-prompt image=256 +kimi_vllm_series = { + "api-kimi-vl-thinking-2506": partial( + KimiVLAPI, + model="api-kimi-vl-thinking-2506", + ), + "api-kimi-vl-thinking": partial( + KimiVLAPI, + model="api-kimi-vl-thinking", + ), + "api-kimi-vl": partial( + KimiVLAPI, + model="api-kimi-vl", + max_new_tokens=2048, + temperature=0, + ), +} + + +treevgr_series = { + 'TreeVGR-7B': partial( + TreeVGR, + model_path='HaochenWang/TreeVGR-7B', + min_pixels=1280*28*28, max_pixels=16384*28*28, + ), +} + +# QTuneVL series +qtunevl_series = { + "QTuneVL1_5-2B": partial( + QTuneVLChat, model_path="hanchaow/QTuneVL1_5-2B", version="V1.5" + ), + + "QTuneVL1_5-3B": partial( + QTuneVL, + model_path="hanchaow/QTuneVL1_5-3B", + min_pixels=1280 * 28 * 28, + max_pixels=16384 * 28 * 28, + use_custom_prompt=True, + post_process=True + ), +} + +logics_series = { + "Logics-Thinking": partial(Logics_Thinking,model_path='Logics-MLLM/Logics-Thinking'), +} + + +internvl_groups = [ + internvl, internvl2, internvl2_5, mini_internvl, internvl2_5_mpo, + internvl3, internvl3_5 +] +internvl_series = {} +for group in internvl_groups: + internvl_series.update(group) + +supported_VLM = {} + +model_groups = [ + ungrouped, o1_apis, api_models, xtuner_series, qwen_series, llava_series, granite_vision_series, + internvl_series, yivl_series, xcomposer_series, minigpt4_series, + idefics_series, instructblip_series, deepseekvl_series, deepseekvl2_series, + janus_series, minicpm_series, cogvlm_series, wemm_series, cambrian_series, + chameleon_series, video_models, ovis_series, vila_series, mantis_series, + mmalaya_series, phi3_series, phi4_series, xgen_mm_series, qwen2vl_series, + slime_series, eagle_series, moondream_series, llama_series, molmo_series, + kosmos_series, points_series, nvlm_series, vintern_series, h2ovl_series, + aria_series, smolvlm_series, sail_series, valley_series, vita_series, + ross_series, emu_series, ola_series, ursa_series, gemma_series, + long_vita_series, ristretto_series, kimi_series, aguvis_series, hawkvl_series, + flash_vl, kimi_vllm_series, oryx_series, treevgr_series, varco_vision_series, qtunevl_series, xvl_series, thyme_series,logics_series +] + +for grp in model_groups: + supported_VLM.update(grp) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/cgbench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/cgbench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b4657884d7221a64c119872460d1dd4b3cf202b Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/cgbench.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dude.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dude.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95c7e910bdf0edc0f6b943e63e225f166c609468 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dude.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dynamath.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dynamath.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4aea68b4851a95a60e4ee6d60289ff533e9f7c9c Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dynamath.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_base.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0ff0381bf065a6a0e2df2b0deeaab04a6a455ac Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_base.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_ccocr.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_ccocr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f79186d1ffab30dceee98cb61b18ba4c985e83c3 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_ccocr.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmgenbench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmgenbench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..082267780fac00cb7486d26acc7ee1609ba34d32 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmgenbench.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmifeval.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmifeval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af258703dcb2e391c3d0d6520b4688f36294bb3b Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmifeval.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmlongbench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmlongbench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..001d65770b1471e9a50664b7159ad53243b43551 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmlongbench.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/moat.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/moat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0fc83ac32d8af816099b81d058b5d4899688c302 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/moat.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mvbench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mvbench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa0082f8dd01f9a57a6319b77fd23e05ebe08282 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mvbench.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/text_base.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/text_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d671e1a7d00a1836f7d7d603fba572546b90e127 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/text_base.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/vl_rewardbench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/vl_rewardbench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3471afbfddae402aa2b4305545fa929205854552 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/vl_rewardbench.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/wildvision.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/wildvision.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c534d44aa512cc5d171d0a0e9523d6c7ecc6fff Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/wildvision.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/worldsense.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/worldsense.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe17e8eafcab001948c3f7f2c5e37e74dd4b58e6 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/worldsense.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/image_ccocr.py b/VLMEvalKit-sudoku/vlmeval/dataset/image_ccocr.py new file mode 100644 index 0000000000000000000000000000000000000000..e70403d645a3fcabb8cd09862861d461e4de440c --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/image_ccocr.py @@ -0,0 +1,303 @@ +# flake8: noqa + +import os +import re +import tempfile +import json +from functools import partial +import pandas as pd + +from .image_base import ImageBaseDataset +from ..smp import * +from ..smp.file import get_intermediate_file_path + +# should be the same as FAIL_MSG definded in vlmeval/inference.py +FAIL_MSG = 'Failed to obtain answer via API.' + + +class CCOCRDataset(ImageBaseDataset): + TYPE = 'VQA' + DATASET_URL_MODELSCOPE = { + "CCOCR_DocParsing_DocPhotoChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_photo_chn_75.tsv", + "CCOCR_DocParsing_DocPhotoEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_photo_eng_75.tsv", + "CCOCR_DocParsing_DocScanChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_scan_chn_75.tsv", + "CCOCR_DocParsing_DocScanEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_scan_eng_75.tsv", + "CCOCR_DocParsing_TablePhotoChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_photo_chn_75.tsv", + "CCOCR_DocParsing_TablePhotoEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_photo_eng_75.tsv", + "CCOCR_DocParsing_TableScanChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_scan_chn_75.tsv", + "CCOCR_DocParsing_TableScanEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_scan_eng_75.tsv", + "CCOCR_DocParsing_MolecularHandwriting": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/molecular/molecular_handwriting_100.tsv", + "CCOCR_DocParsing_FormulaHandwriting": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/formula/formula_handwriting_100.tsv", + "CCOCR_Kie_Sroie2019Word": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/sroie2019_word_347.tsv", + "CCOCR_Kie_Cord": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/CORD_100.tsv", + "CCOCR_Kie_EphoieScut": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/EPHOIE_SCUT_311.tsv", + "CCOCR_Kie_Poie": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/POIE_250.tsv", + "CCOCR_Kie_ColdSibr": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/open_category/COLD_SIBR_400.tsv", + "CCOCR_Kie_ColdCell": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/open_category/COLD_CELL_600.tsv", + "CCOCR_MultiLanOcr_Arabic": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Arabic/Arabic_150.tsv", + "CCOCR_MultiLanOcr_French": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/French/French_150.tsv", + "CCOCR_MultiLanOcr_German": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/German/German_150.tsv", + "CCOCR_MultiLanOcr_Italian": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Italian/Italian_150.tsv", + "CCOCR_MultiLanOcr_Japanese": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Japanese/Japanese_150.tsv", + "CCOCR_MultiLanOcr_Korean": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Korean/Korean_150.tsv", + "CCOCR_MultiLanOcr_Portuguese": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Portuguese/Portuguese_150.tsv", + "CCOCR_MultiLanOcr_Russian": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Russian/Russian_150.tsv", + "CCOCR_MultiLanOcr_Spanish": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Spanish/Spanish_150.tsv", + "CCOCR_MultiLanOcr_Vietnamese": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Vietnamese/Vietnamese_150.tsv", + "CCOCR_MultiSceneOcr_Cord": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/CORD_100.tsv", + "CCOCR_MultiSceneOcr_Funsd": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/FUNSD_50.tsv", + "CCOCR_MultiSceneOcr_Iam": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/IAM_50.tsv", + "CCOCR_MultiSceneOcr_ZhDoc": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/zh_doc_100.tsv", + "CCOCR_MultiSceneOcr_ZhHandwriting": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/zh_handwriting_50.tsv", + "CCOCR_MultiSceneOcr_Hieragent": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/Hieragent_100.tsv", + "CCOCR_MultiSceneOcr_Ic15": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/IC15_500.tsv", + "CCOCR_MultiSceneOcr_Inversetext": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/InverseText_500.tsv", + "CCOCR_MultiSceneOcr_Totaltext": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/TotalText_300.tsv", + "CCOCR_MultiSceneOcr_ZhScene": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/zh_scene_450.tsv", + "CCOCR_MultiSceneOcr_UgcLaion": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/ugc_text/ugc_laion_400.tsv", + "CCOCR_MultiSceneOcr_ZhDense": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/ugc_text/zh_dense_50.tsv", + "CCOCR_MultiSceneOcr_ZhVertical": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/ugc_text/zh_vertical_100.tsv", + "CCOCR": "http://opencompass.openxlab.space/utils/VLMEval/CCOCR.tsv" + } + + DATASET_URL_HUGGINGFACE = { + "CCOCR_DocParsing_DocPhotoChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_photo_chn_75.tsv", + "CCOCR_DocParsing_DocPhotoEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_photo_eng_75.tsv", + "CCOCR_DocParsing_DocScanChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_scan_chn_75.tsv", + "CCOCR_DocParsing_DocScanEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_scan_eng_75.tsv", + "CCOCR_DocParsing_TablePhotoChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_photo_chn_75.tsv", + "CCOCR_DocParsing_TablePhotoEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_photo_eng_75.tsv", + "CCOCR_DocParsing_TableScanChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_scan_chn_75.tsv", + "CCOCR_DocParsing_TableScanEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_scan_eng_75.tsv", + "CCOCR_DocParsing_MolecularHandwriting": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/molecular/molecular_handwriting_100.tsv", + "CCOCR_DocParsing_FormulaHandwriting": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/formula/formula_handwriting_100.tsv", + "CCOCR_Kie_Sroie2019Word": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/sroie2019_word_347.tsv", + "CCOCR_Kie_Cord": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/CORD_100.tsv", + "CCOCR_Kie_EphoieScut": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/EPHOIE_SCUT_311.tsv", + "CCOCR_Kie_Poie": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/POIE_250.tsv", + "CCOCR_Kie_ColdSibr": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/open_category/COLD_SIBR_400.tsv", + "CCOCR_Kie_ColdCell": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/open_category/COLD_CELL_600.tsv", + "CCOCR_MultiLanOcr_Arabic": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Arabic/Arabic_150.tsv", + "CCOCR_MultiLanOcr_French": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/French/French_150.tsv", + "CCOCR_MultiLanOcr_German": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/German/German_150.tsv", + "CCOCR_MultiLanOcr_Italian": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Italian/Italian_150.tsv", + "CCOCR_MultiLanOcr_Japanese": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Japanese/Japanese_150.tsv", + "CCOCR_MultiLanOcr_Korean": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Korean/Korean_150.tsv", + "CCOCR_MultiLanOcr_Portuguese": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Portuguese/Portuguese_150.tsv", + "CCOCR_MultiLanOcr_Russian": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Russian/Russian_150.tsv", + "CCOCR_MultiLanOcr_Spanish": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Spanish/Spanish_150.tsv", + "CCOCR_MultiLanOcr_Vietnamese": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Vietnamese/Vietnamese_150.tsv", + "CCOCR_MultiSceneOcr_Cord": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/CORD_100.tsv", + "CCOCR_MultiSceneOcr_Funsd": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/FUNSD_50.tsv", + "CCOCR_MultiSceneOcr_Iam": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/IAM_50.tsv", + "CCOCR_MultiSceneOcr_ZhDoc": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/zh_doc_100.tsv", + "CCOCR_MultiSceneOcr_ZhHandwriting": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/zh_handwriting_50.tsv", + "CCOCR_MultiSceneOcr_Hieragent": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/Hieragent_100.tsv", + "CCOCR_MultiSceneOcr_Ic15": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/IC15_500.tsv", + "CCOCR_MultiSceneOcr_Inversetext": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/InverseText_500.tsv", + "CCOCR_MultiSceneOcr_Totaltext": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/TotalText_300.tsv", + "CCOCR_MultiSceneOcr_ZhScene": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/zh_scene_450.tsv", + "CCOCR_MultiSceneOcr_UgcLaion": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/ugc_text/ugc_laion_400.tsv", + "CCOCR_MultiSceneOcr_ZhDense": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/ugc_text/zh_dense_50.tsv", + "CCOCR_MultiSceneOcr_ZhVertical": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/ugc_text/zh_vertical_100.tsv", + "CCOCR": "http://opencompass.openxlab.space/utils/VLMEval/CCOCR.tsv" + } + + # define data path + DATASET_URL = DATASET_URL_MODELSCOPE + DATASET_MD5 = { + "CCOCR_DocParsing_DocPhotoChn": "9039dcbb31830d413261a95cfa29d97f", + "CCOCR_DocParsing_DocPhotoEng": "2ca0824881e1d7317626f2a19d902989", + "CCOCR_DocParsing_DocScanChn": "9e265c8aa760ebdf5c3bf9e892d55492", + "CCOCR_DocParsing_DocScanEng": "77d04637be3def86dbc2ce37ba64a704", + "CCOCR_DocParsing_TablePhotoChn": "c4dc85252ddad2b43a03a67b1d1ae983", + "CCOCR_DocParsing_TablePhotoEng": "02ab75d6169da0cd2ece9ce0ae14a479", + "CCOCR_DocParsing_TableScanChn": "f1f79959fdd01127df7377c9d46722f2", + "CCOCR_DocParsing_TableScanEng": "794903c7acf52bfe956eefba2166d14b", + "CCOCR_DocParsing_MolecularHandwriting": "30b7f7679b713ce000a939eca7b4078f", + "CCOCR_DocParsing_FormulaHandwriting": "e03047776ce5e79a61ae1c057e2a348e", + "CCOCR_Kie_Sroie2019Word": "3287d99a8e86a99b74171fa5a70f9acb", + "CCOCR_Kie_Cord": "ab297cadcbc7158884a301c366f3330a", + "CCOCR_Kie_EphoieScut": "bb8fa3ba7ea91cbf17be0904956ad3f3", + "CCOCR_Kie_Poie": "882b64317989ecbfed6518051cdffb14", + "CCOCR_Kie_ColdSibr": "109d5dad8b7081fb6a2f088e963196d4", + "CCOCR_Kie_ColdCell": "7b44c45b4d7d768d1dbdc08872fe7d3a", + "CCOCR_MultiLanOcr_Arabic": "e9a3f2bb9298d0b882ebc7a98980c3f3", + "CCOCR_MultiLanOcr_French": "729407ed2036c22e602eff645eddd40c", + "CCOCR_MultiLanOcr_German": "96fc2edae747f0ec95b0a6f9bf723022", + "CCOCR_MultiLanOcr_Italian": "29a508fa5d5a5e767497dd69e2430ebb", + "CCOCR_MultiLanOcr_Japanese": "bbcca96ccf25fff63597c2ab4f3ebb1f", + "CCOCR_MultiLanOcr_Korean": "0f55dbd24eba5edc189c91e124411641", + "CCOCR_MultiLanOcr_Portuguese": "a6fcf8831775a61aa631c0cf1c422ae7", + "CCOCR_MultiLanOcr_Russian": "19d2f84062a1699d3e9333912bd6b303", + "CCOCR_MultiLanOcr_Spanish": "f5a0cfa9f2ae4115c91c7b362034e591", + "CCOCR_MultiLanOcr_Vietnamese": "bf1cd4e83d91767f4906f81550cec8b9", + "CCOCR_MultiSceneOcr_Cord": "92943f0ccb4c5a196c574222e76759a0", + "CCOCR_MultiSceneOcr_Funsd": "229cc38d193edd00f4383610e98ee873", + "CCOCR_MultiSceneOcr_Iam": "d897a6d6c3880c65e752ec11b211204c", + "CCOCR_MultiSceneOcr_ZhDoc": "303682cc16c8bb51b2b896f8ceb8bd38", + "CCOCR_MultiSceneOcr_ZhHandwriting": "faa298d366bc05e5cfb39e334afb8eff", + "CCOCR_MultiSceneOcr_Hieragent": "6f132cdd0473d7cc145c3e3a08957dd6", + "CCOCR_MultiSceneOcr_Ic15": "3d94869f312a41d53d0578a06a2fb1f2", + "CCOCR_MultiSceneOcr_Inversetext": "e141d424a0c4cf9579064428a270f13d", + "CCOCR_MultiSceneOcr_Totaltext": "ca1daf81d49eeb57ef844b72a23c2e62", + "CCOCR_MultiSceneOcr_ZhScene": "9295152a66e6f117db8bfbb20a9013e6", + "CCOCR_MultiSceneOcr_UgcLaion": "8e9ea1fbf9d56532157e807eabf39b21", + "CCOCR_MultiSceneOcr_ZhDense": "de8f48ee0c8a2cf8ed7f2b3a81e6322d", + "CCOCR_MultiSceneOcr_ZhVertical": "4892b4aec6e7fd11e39aaea23712709b", + "CCOCR": "f8927b76510ffe04e59d45e3f8e8b620" + } + + def _evaluate_single_dataset(self, sub_df, data_name, **judge_kwargs): + """ + Evaluate a single sub-dataset from the combined CCOCR tsv + """ + dict_list = sub_df.to_dict(orient='records') + + gt_info, ptd_info = {}, {} + for data_info in dict_list: + image_name = data_info['image_name'] + gt_info[image_name] = data_info['answer'] + + # warning the FAIL samples + if data_info['prediction'] != FAIL_MSG: + ptd_info[image_name] = data_info['prediction'] + + # Extract metadata from the sub-dataset + group_name = str(sub_df['category'].iloc[0]) + op_name = str(sub_df['l2-category'].iloc[0]) + + data_info = {"op": op_name, "group": group_name, "dataset": data_name, "num": len(gt_info)} + + try: + from .utils.ccocr_evaluator import evaluator_map_info as ccocr_evaluator_map + except ImportError as err: + import warnings + warnings.warn('The dependency of CCOCR evaluator is not properly installed') + warnings.warn(f'{type(err)}: {err}') + return None, None + + eval_func = ccocr_evaluator_map.get(group_name, None) + if eval_func is None: + print(f"Warning: evaluator not defined for: {group_name}") + return None, None + + meta_info, eval_info = eval_func(ptd_info, gt_info, **data_info) + + return {"meta": meta_info, "evaluation": eval_info, "config": data_info}, eval_info.get("summary") + + # It returns a DataFrame + def evaluate(self, eval_file, **judge_kwargs): + """ + Evaluate the combined CCOCR dataset containing all sub-datasets + """ + df = load(eval_file) + df['prediction'] = [str(x) for x in df['prediction']] + required_colume_list = ['answer', 'prediction', "category", "image_name", "l2-category", "split"] + for required_colume in required_colume_list: + assert required_colume in df, "required_colume: {} NOT found".format(required_colume) + + # Create unique sub-dataset identifiers using category, l2-category, and split + df['sub_dataset_id'] = df['category'].astype(str) + '_' + df['l2-category'].astype(str) + '_' + df['split'].astype(str) + + # Get all unique sub-datasets from the combined identifier + unique_sub_datasets = df['sub_dataset_id'].unique() + + all_results = {} + all_summaries = {} + + # Process each sub-dataset separately + for sub_dataset_id in tqdm(unique_sub_datasets, desc="Processing sub-datasets"): + print(f"Processing sub-dataset: {sub_dataset_id}") + + # Filter data for this specific sub-dataset + sub_df = df[df['sub_dataset_id'] == sub_dataset_id].copy() + + if len(sub_df) == 0: + print(f"Warning: No data found for sub-dataset: {sub_dataset_id}") + continue + + # Get the original split name for compatibility (use the split value) + split_name = sub_df['split'].iloc[0] + + # Evaluate this sub-dataset + result_info, summary = self._evaluate_single_dataset(sub_df, split_name, **judge_kwargs) + + if result_info is not None: + all_results[sub_dataset_id] = result_info + all_summaries[sub_dataset_id] = summary + print(f"Completed evaluation for {sub_dataset_id}: {summary}") + else: + print(f"Failed to evaluate {sub_dataset_id}") + + # Save comprehensive results + result_file = get_intermediate_file_path(eval_file, '_comprehensive_eval', 'json') + comprehensive_result = { + "meta": {"total_datasets": len(all_results), "datasets": list(all_results.keys())}, + "results": all_results, + "summaries": all_summaries + } + dump(comprehensive_result, result_file) + print(f"Comprehensive results saved to: {result_file}") + + # Final Aggregation Logic + lan_ocr_scores = [] + scene_ocr_scores = [] + kie_scores = [] + doc_parsing_scores = [] + + for key, summary in all_summaries.items(): + if not isinstance(summary, dict): + continue + + if 'lan_ocr' in key: + if 'macro_f1_score' in summary: + lan_ocr_scores.append(summary['macro_f1_score']) + elif 'scene_ocr' in key: + if 'macro_f1_score' in summary: + scene_ocr_scores.append(summary['macro_f1_score']) + elif 'kie' in key: + if 'acc' in summary: + kie_scores.append(summary['acc']) + elif 'doc_parsing' in key: + if 'score' in summary: + doc_parsing_scores.append(summary['score']) + + res = {} + category_averages = [] + + if lan_ocr_scores: + avg = sum(lan_ocr_scores) / len(lan_ocr_scores) + res['lan_ocr'] = avg + category_averages.append(avg) + + if scene_ocr_scores: + avg = sum(scene_ocr_scores) / len(scene_ocr_scores) + res['scene_ocr'] = avg + category_averages.append(avg) + + if kie_scores: + avg = sum(kie_scores) / len(kie_scores) + res['kie'] = avg + category_averages.append(avg) + + if doc_parsing_scores: + avg = sum(doc_parsing_scores) / len(doc_parsing_scores) + res['doc_parsing'] = avg + category_averages.append(avg) + + if category_averages: + res['total'] = sum(category_averages) / len(category_averages) + else: + res['total'] = 0 + + print("\n" + "="*80) + print("Final Aggregated Results:") + print("="*80) + for k, v in res.items(): + print(f" {k.upper():<20}: {v:.4f}") + print("="*80) + df = d2df(res) + score_file = get_intermediate_file_path(eval_file, '_acc', 'csv') + dump(df, score_file) + return res diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/mmgenbench.py b/VLMEvalKit-sudoku/vlmeval/dataset/mmgenbench.py new file mode 100644 index 0000000000000000000000000000000000000000..7dec7946383f7dc6119ba3d6aebd6eabe0daa850 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/mmgenbench.py @@ -0,0 +1,69 @@ +import warnings +import pandas as pd +from abc import abstractmethod +from ..smp import * +from .image_base import ImageBaseDataset + + +class MMGenBench(ImageBaseDataset): + + prompt_list = [ + """ +# Role +You are an expert in the field of image understanding, focusing on the \ +understanding of images and generating the image caption-prompt. + +# Definition Explanation +image caption-prompt: Refers to the caption or description of an image, \ +used to provide to a Text-to-Image model to generate a new image. +Text-to-Image model: Can generate a new image based on the provided image \ +caption-prompt, such as stable diffusion 3, flux, and other image generation models. + +# Task Description +Generate an image caption-prompt based on the input image. + +# Key Points and Requirements +1. Accurately understand the input image and precisely generate an image caption-prompt. +2. The generated image caption-prompt, when provided to the Text-to-Image model, requires the \ +Text-to-Image model to generate a new image that is as consistent as possible with the input image. +3. The generated image caption-prompt must conform to the preferences of the Text-to-Image model. +4. The generated image caption-prompt should describe the input image in as much \ +detail as possible, and it should be between 20 to 60 words. + +# Output Format +A string, that is the image caption-prompt. No extra output needed. +""" + ] + TYPE = 'GenerateImgPrompt' + DATASET_URL = { + 'MMGenBench-Test': 'https://huggingface.co/datasets/lerogo/MMGenBench/resolve/main/MMGenBench-Test.tsv', + 'MMGenBench-Domain': 'https://huggingface.co/datasets/lerogo/MMGenBench/resolve/main/MMGenBench-Domain.tsv', + } + PROMPT_MAP = { + 'MMGenBench-Test': prompt_list[0], + 'MMGenBench-Domain': prompt_list[0], + } + DATASET_MD5 = { + 'MMGenBench-Test': "94f8dac6bbf7c20be403f99adeaa73da", + 'MMGenBench-Domain': "5c10daf6e2c5f08bdfb0701aa6db86bb", + } + + def __init__(self, dataset='MMGenBench', **kwargs): + super().__init__(dataset, **kwargs) + warnings.warn('This dataset is for inference only and does not support direct output of evaluation results.\n') + warnings.warn('Please refer to "https://github.com/lerogo/MMGenBench" for more evaluation information.\n') + + def load_data(self, dataset): + data = super().load_data(dataset) + if 'question' not in data: + data['question'] = [( + self.PROMPT_MAP[dataset] + )] * len(data) + return data + + # Given the prediction file, return the evaluation results in the format of a dictionary or pandas dataframe + @abstractmethod + def evaluate(self, eval_file, **judge_kwargs): + warnings.warn('This evaluation method is not supported.\n') + warnings.warn('Please refer to "https://github.com/lerogo/MMGenBench" for more evaluation information.\n') + return None diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/__init__.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f280383b86e8e3afbd66e586120c3d5aa958425 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/cgbench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/cgbench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c24ac524672b7d1a939e5cc6fcbc2b446e907e36 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/cgbench.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/judge_util.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/judge_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b3ea6a23d5cb3ac00b30f8b43ac33154232e3f9 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/judge_util.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/mvbench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/mvbench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f466642c439f6c520a93837e9cfdb8cc6666936f Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/mvbench.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/qbench_video.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/qbench_video.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff8df29e6f128e6f7c4fa1f80f32d00821f0c455 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/qbench_video.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tablevqabench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tablevqabench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96b3c37433956ad3a2ebfc98deda24e11c8515ae Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/tablevqabench.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/vlm2bench.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/vlm2bench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45b155e5c4322d876f2b3a3ad46c3be49bff3d0c Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/vlm2bench.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/vqa_eval.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/vqa_eval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd996b7ba43e10e4bcfa82190a1cd31fdd0227b7 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/__pycache__/vqa_eval.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/README.md b/VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..99572ef587eb9e5689199ba965f399a16eeb4b1a --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/README.md @@ -0,0 +1,59 @@ +# CC-OCR: A Comprehensive and Challenging OCR Benchmark for Evaluating Large Multimodal Models in Literacy + +## Introduction + +Please refer to our [GitHub](https://github.com/AlibabaResearch/AdvancedLiterateMachinery/tree/main/Benchmarks/CC-OCR) for more information. + +## Running Scripts + +Once the environment is ready, execute the following script from the root directory of VLMEvalKit +to perform inference and evaluation tasks in batch. + +```shell +MODEL_NAME="QwenVLMax" +OUTPUT_DIR="/your/path/to/output_dir" + +SUB_OUTPUT_DIR=${OUTPUT_DIR}/multi_scene_ocr +python run.py --data CCOCR_MultiSceneOcr_Cord CCOCR_MultiSceneOcr_Funsd CCOCR_MultiSceneOcr_Iam CCOCR_MultiSceneOcr_ZhDoc CCOCR_MultiSceneOcr_ZhHandwriting CCOCR_MultiSceneOcr_Hieragent CCOCR_MultiSceneOcr_Ic15 CCOCR_MultiSceneOcr_Inversetext CCOCR_MultiSceneOcr_Totaltext CCOCR_MultiSceneOcr_ZhScene CCOCR_MultiSceneOcr_UgcLaion CCOCR_MultiSceneOcr_ZhDense CCOCR_MultiSceneOcr_ZhVertical --model ${MODEL_NAME} --work-dir ${SUB_OUTPUT_DIR} --verbose +python vlmeval/dataset/utils/ccocr_evaluator/common.py ${SUB_OUTPUT_DIR} + +SUB_OUTPUT_DIR=${OUTPUT_DIR}/multi_lan_ocr +python run.py --data CCOCR_MultiLanOcr_Arabic CCOCR_MultiLanOcr_French CCOCR_MultiLanOcr_German CCOCR_MultiLanOcr_Italian CCOCR_MultiLanOcr_Japanese CCOCR_MultiLanOcr_Korean CCOCR_MultiLanOcr_Portuguese CCOCR_MultiLanOcr_Russian CCOCR_MultiLanOcr_Spanish CCOCR_MultiLanOcr_Vietnamese --model ${MODEL_NAME} --work-dir ${SUB_OUTPUT_DIR} --verbose +python vlmeval/dataset/utils/ccocr_evaluator/common.py ${SUB_OUTPUT_DIR} + +SUB_OUTPUT_DIR=${OUTPUT_DIR}/doc_parsing +python run.py --data CCOCR_DocParsing_DocPhotoChn CCOCR_DocParsing_DocPhotoEng CCOCR_DocParsing_DocScanChn CCOCR_DocParsing_DocScanEng CCOCR_DocParsing_TablePhotoChn CCOCR_DocParsing_TablePhotoEng CCOCR_DocParsing_TableScanChn CCOCR_DocParsing_TableScanEng CCOCR_DocParsing_MolecularHandwriting CCOCR_DocParsing_FormulaHandwriting --model ${MODEL_NAME} --work-dir ${SUB_OUTPUT_DIR} --verbose +python vlmeval/dataset/utils/ccocr_evaluator/common.py ${SUB_OUTPUT_DIR} + +SUB_OUTPUT_DIR=${OUTPUT_DIR}/kie +python run.py --data CCOCR_Kie_Sroie2019Word CCOCR_Kie_Cord CCOCR_Kie_EphoieScut CCOCR_Kie_Poie CCOCR_Kie_ColdSibr CCOCR_Kie_ColdCell --model ${MODEL_NAME} --work-dir ${SUB_OUTPUT_DIR} --verbose +python vlmeval/dataset/utils/ccocr_evaluator/common.py ${SUB_OUTPUT_DIR} +``` + +## Example Output +The evaluation results will be saved in `${SUB_OUTPUT_DIR}/summary.md`. For example, for the KIE subset, +the output is as follows: + +| exp_name(f1_score) | COLD_CELL | COLD_SIBR | CORD | EPHOIE_SCUT | POIE | sroie2019_word | summary | +|:-------------------|------------:|------------:|-------:|--------------:|-------:|-----------------:|----------:| +| QwenVLMax | 81.01 | 72.46 | 69.33 | 71.2 | 60.85 | 76.37 | 71.87 | + + +## Citation +If you find our work helpful, feel free to give us a cite. + +``` +@misc{yang2024ccocr, + title={CC-OCR: A Comprehensive and Challenging OCR Benchmark for Evaluating Large Multimodal Models in Literacy}, + author={Zhibo Yang and Jun Tang and Zhaohai Li and Pengfei Wang and Jianqiang Wan and Humen Zhong and Xuejing Liu and Mingkun Yang and Peng Wang and Shuai Bai and LianWen Jin and Junyang Lin}, + year={2024}, + eprint={2412.02210}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2412.02210}, +} +``` + +## Contact Us + +If you have any questions, feel free to send an email to: wpf272043@alibaba-inc.com or xixing.tj@alibaba-inc.com diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/doc_parsing_evaluator.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/doc_parsing_evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..d059adc0935e1b5c3370ea6fd1c21df3fd9bffc2 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/ccocr_evaluator/doc_parsing_evaluator.py @@ -0,0 +1,256 @@ +import nltk +import re +from tqdm import tqdm +from collections import deque +from apted.helpers import Tree +from apted import APTED, Config + +# local import +from .common import BaseMetric + + +# 移除指定的LaTeX命令 +patterns = [ + r'\\documentclass\{.*?\}', + r'\\usepackage\[.*?\]\{.*?\}', + r'\\usepackage\{.*?\}', + r'\\geometry\{.*?\}', + r'\\begin\{document\}', + r'\\end\{document\}', + r'\\noindent' +] + + +class TableTree(Tree): + """ + # Copyright 2020 IBM + # Author: peter.zhong@au1.ibm.com + # License: Apache 2.0 License. + """ + def __init__(self, tag, colspan=None, rowspan=None, content=None, *children): + self.tag = tag + self.colspan = colspan + self.rowspan = rowspan + self.content = content + self.children = list(children) + + def bracket(self): + """Show tree using brackets notation""" + if self.tag == "td": + result = '"tag": %s, "colspan": %d, "rowspan": %d, "text": %s' % ( + self.tag, + self.colspan, + self.rowspan, + self.content, + ) + else: + result = '"tag": %s' % self.tag + for child in self.children: + result += child.bracket() + return "{{{}}}".format(result) + + +class CustomConfig(Config): + """ + # Copyright 2020 IBM + # Author: peter.zhong@au1.ibm.com + # License: Apache 2.0 License. + """ + def rename(self, node1, node2): + """Compares attributes of trees""" + # print(node1.tag) + if ( + (node1.tag != node2.tag) + or (node1.colspan != node2.colspan) + or (node1.rowspan != node2.rowspan) + ): + return 1.0 + if node1.tag == "td": + if node1.content or node2.content: + return nltk.edit_distance(node1.content, node2.content) / max(len(node1.content), len(node2.content)) + return 0.0 + + +class TEDS(object): + """Tree Edit Distance basead Similarity + # Copyright 2020 IBM + # Author: peter.zhong@au1.ibm.com + # License: Apache 2.0 License. + """ + def __init__(self, structure_only=False, n_jobs=1, ignore_nodes=None): + assert isinstance(n_jobs, int) and ( + n_jobs >= 1 + ), "n_jobs must be an integer greather than 1" + self.structure_only = structure_only + self.n_jobs = n_jobs + self.ignore_nodes = ignore_nodes + self.__tokens__ = [] + + def tokenize(self, node): + """Tokenizes table cells""" + self.__tokens__.append("<%s>" % node.tag) + if node.text is not None: + self.__tokens__ += list(node.text) + for n in node.getchildren(): + self.tokenize(n) + if node.tag != "unk": + self.__tokens__.append("" % node.tag) + if node.tag != "td" and node.tail is not None: + self.__tokens__ += list(node.tail) + + def load_html_tree(self, node, parent=None): + """Converts HTML tree to the format required by apted""" + global __tokens__ + if node.tag == "td": + if self.structure_only: + cell = [] + else: + self.__tokens__ = [] + self.tokenize(node) + cell = self.__tokens__[1:-1].copy() + new_node = TableTree( + node.tag, + int(node.attrib.get("colspan", "1")), + int(node.attrib.get("rowspan", "1")), + cell, + *deque(), + ) + else: + new_node = TableTree(node.tag, None, None, None, *deque()) + if parent is not None: + parent.children.append(new_node) + if node.tag != "td": + for n in node.getchildren(): + self.load_html_tree(n, new_node) + if parent is None: + return new_node + + def evaluate(self, pred, true): + """Computes TEDS score between the prediction and the ground truth of a + given sample + """ + # try_import("lxml") + from lxml import etree, html + if (not pred) or (not true): + return 0.0 + + parser = html.HTMLParser(remove_comments=True, encoding="utf-8") + pred = html.fromstring(pred, parser=parser) + true = html.fromstring(true, parser=parser) + if pred.xpath("body/table") and true.xpath("body/table"): + pred = pred.xpath("body/table")[0] + true = true.xpath("body/table")[0] + if self.ignore_nodes: + etree.strip_tags(pred, *self.ignore_nodes) + etree.strip_tags(true, *self.ignore_nodes) + n_nodes_pred = len(pred.xpath(".//*")) + n_nodes_true = len(true.xpath(".//*")) + n_nodes = max(n_nodes_pred, n_nodes_true) + tree_pred = self.load_html_tree(pred) + tree_true = self.load_html_tree(true) + distance = APTED( + tree_pred, tree_true, CustomConfig() + ).compute_edit_distance() + return 1.0 - (float(distance) / n_nodes) + else: + return 0.0 + + +class ParsingEvaluator(BaseMetric): + def response_post_func(self, response_text, **kwargs): + return response_text + + def evaluate(self, response_info, gt_info, **kwargs): + op = kwargs['op'] + if op == 'doc': + score = self.eval_doc(response_info, gt_info) + elif op == 'table': + score = self.eval_table(response_info, gt_info) + elif op in ['molecular', "formula"]: + score = self.eval_formula(response_info, gt_info, op_name=op) + else: + raise ValueError(f'doc parsing unsupported op: {op}') + + # summary info + eval_info = {"summary": {"score": score}} + return eval_info + + def eval_doc(self, response_info, gt_info): + results = [] + for img_name, gt in tqdm(gt_info.items()): + if img_name not in response_info: + results.append(0) + continue + + pred = response_info[img_name] + for pattern in patterns: + pred = re.sub(pattern, '', pred) + + try: + pred = pred.split('```')[1] + except: + pass + + pred = pred.replace('```latex', '') + pred = pred.replace('```', '') + + pred = pred.replace(' ', '').replace('\n', '') + gt = gt.replace(' ', '').replace('\n', '') + + edit_dist = nltk.edit_distance(pred, gt) / max(len(pred), len(gt)) + results.append(1 - edit_dist) + + score = sum(results) / len(results) + return score + + def eval_table(self, response_info, gt_info): + teds = TEDS(structure_only=False, n_jobs=1) + results = [] + for img_name, gt in tqdm(gt_info.items()): + if img_name not in response_info: + results.append(0) + continue + + pred = response_info[img_name] + for pattern in patterns: + pred = re.sub(pattern, '', pred) + + try: + pred = pred.split('```html')[1] + except: + pass + + pred = pred.replace('```', '') + pred = pred.replace(' ', '').replace('\n', '').replace(',', ',') + gt = gt.replace(' ', '').replace('\n', '') + + pred_html = '{}'.format(pred) + gt_html = '{}'.format(gt) + results.append(teds.evaluate(pred_html, gt_html)) + + score = sum(results) / len(results) + return score + + def eval_formula(self, response_info, gt_info, op_name='formula'): + results = [] + for img_name, gt in tqdm(gt_info.items()): + if img_name not in response_info: + results.append(0) + continue + + pred = response_info[img_name] + + if op_name == 'formula': + pred = pred.replace("\n", " ").replace("```latex", "").replace("```", "").replace("\t", " ").replace(" ", "") # noqa: E501 + gt = gt.replace(" ", "") + elif op_name == 'molecular': + pred = pred.replace("\n", "").replace(" ", "").replace("", "").replace("", "") + gt = gt.replace(" ", "") + edit_dist = nltk.edit_distance(pred, gt) / max(len(pred), len(gt)) + results.append(1 - edit_dist) + score = sum(results) / len(results) + return score + + +if __name__ == '__main__': + pass diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/text_evaluator.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/text_evaluator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db73f942b9b3d428ab9edc95cabbfdc1356675ca Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/dataset/utils/chartmimic/evaluator/__pycache__/text_evaluator.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/longvideobench.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/longvideobench.py new file mode 100644 index 0000000000000000000000000000000000000000..ca814bd2117b169e38ab4796b21b5685ce497e4a --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/longvideobench.py @@ -0,0 +1,80 @@ +from ...smp import * +from .multiple_choice import extract_answer_from_item +import numpy as np +import re + +FAIL_MSG = 'Failed to obtain answer via API.' + +DURATIONS = [15, 60, 600, 3600] +TASK_CATEGORIES = [ + "S2E", "S2O", "S2A", + "E2O", "O2E", "T2E", + "T2O", "T2A", "E3E", + "O3O", "SSS", "SOS", + "SAA", "T3E", "T3O", + "TOS", "TAA" +] + + +def get_dimension_rating(data_path): + data = load(data_path) + print(data.iloc[0]) + + duration_rating = {k: {} for k in DURATIONS} + for duration in DURATIONS + ['overall']: + duration_rating[duration] = { + 'overall': '', + 'question_category': {k: [] for k in TASK_CATEGORIES} + } + + for i in range(len(data)): + + task_ctg = data.iloc[i]['question_category'] + + duration = data.iloc[i]['duration_group'] + duration_rating[duration]['question_category'][task_ctg].append(data.iloc[i]['score']) + + duration_rating['overall']['question_category'][task_ctg].append(data.iloc[i]['score']) + + for duration in DURATIONS + ['overall']: + overall_res_dur = f'{np.mean([x for x in sum(duration_rating[duration]["question_category"].values(), []) if x >= 0]):.3f}' # noqa: E501 + duration_rating[duration]['overall'] = overall_res_dur + for task_ctg in TASK_CATEGORIES: + task_res_dur = f'{np.mean([x for x in duration_rating[duration]["question_category"][task_ctg] if x >= 0]):.3f}' # noqa: E501 + duration_rating[duration]['question_category'][task_ctg] = task_res_dur + + return duration_rating + + +def extract_option(model, input_item, dataset_name): + options = input_item['question'].split('\n')[1:] + for id, option in enumerate(options): + option_id = chr(ord('A') + id) + '.' + if option.find(option_id) >= 0: + input_item[chr(ord('A') + id)] = option[option.find(option_id) + len(option_id):].strip('. \n') + return extract_answer_from_item(model, input_item, dataset_name)['opt'] + + +def extract_characters_regex(s): + s = s.strip() + answer_prefixes = [ + 'The best answer is', + 'The correct answer is', + 'The answer is', + 'The answer', + 'The best option is' + 'The correct option is', + 'Best answer:' + 'Best option:', + 'Answer:', + 'Option:', + ] + for answer_prefix in answer_prefixes: + s = s.replace(answer_prefix, '') + + if len(s.split()) > 10 and not re.search('[ABCDE]', s): + return '' + matches = re.search(r'[ABCDE]', s) + if matches is None: + return '' + return matches[0] diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmvet.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmvet.py new file mode 100644 index 0000000000000000000000000000000000000000..e035eb0b6a29223304309e5624e8f75e328e2b5d --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mmvet.py @@ -0,0 +1,106 @@ +from ...smp import * + + +def build_mmvet_gpt4_prompt(line): + question = line['question'] + gt = str(line['answer']) + prediction = str(line['prediction']) + prompt = """ +Compare the ground truth and prediction from AI models, to give a correctness score for the prediction. + in the ground truth means it is totally right +only when all elements in the ground truth are present in the prediction, +and means it is totally right when any one element in the ground truth is present in the prediction. +The correctness score is 0.0 (totally wrong), 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, or 1.0 (totally right). +Just complete the last space of the correctness score. + +Question | Ground truth | Prediction | Correctness +--- | --- | --- | --- +What is x in the equation? | -1 -5 | x = 3 | 0.0 +What is x in the equation? | -1 -5 | x = -1 | 0.5 +What is x in the equation? | -1 -5 | x = -5 | 0.5 +What is x in the equation? | -1 -5 | x = -5 or 5 | 0.5 +What is x in the equation? | -1 -5 | x = -1 or x = -5 | 1.0 +Can you explain this meme? | This meme is poking fun at the fact that the names of the countries +Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, +while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues +because the names of these countries do not accurately represent their landscapes. | +The meme talks about Iceland and Greenland. It's pointing out that despite their names, +Iceland is not very icy and Greenland isn't very green. | 0.4 +Can you explain this meme? | This meme is poking fun at the fact that the names of the countries +Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, +while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues +because the names of these countries do not accurately represent their landscapes. | +The meme is using humor to point out the misleading nature of Iceland's and Greenland's names. +Iceland, despite its name, has lush green landscapes while Greenland is mostly covered in ice and snow. +The text 'This is why I have trust issues' is a playful way to suggest +that these contradictions can lead to distrust or confusion. +The humor in this meme is derived from the unexpected contrast between the names of the countries +and their actual physical characteristics. | 1.0 +""" + gpt4_prompt = prompt + '\n' + ' | '.join( + [question, gt.replace('', ' ').replace('', ' '), prediction, '']) + return gpt4_prompt + + +def MMVet_auxeval(model, line): + def float_cvt(s): + try: + return float(s) + except ValueError: + return None + + prompt = build_mmvet_gpt4_prompt(line) + log = '' + retry = 5 + for i in range(retry): + output = model.generate(prompt, temperature=i * 0.5) + score = float_cvt(output) + if score is None: + log += f'Try {i}: output is {output}, failed to parse.\n' + elif score < 0 or score > 1: + log += f'Try {i}: output is {output}, invalid score: {score}.\n' + else: + log += 'Succeed' + return dict(log=log, score=score) + log += 'All 5 retries failed.\n' + return dict(log=log, score=0.0) + + +def MMVet_acc(result_file): + data = load(result_file) + tot = defaultdict(lambda: 0) + score = defaultdict(lambda: 0) + lt = len(data) + cate2_list = [] + for i in range(lt): + item = data.iloc[i] + cate = item['category'] + cate2 = cate.replace(',', '_') + if cate2 not in cate2_list: + cate2_list.append(cate2) + grade = float(item['score']) + cate_list = ['rec', 'ocr', 'know', 'gen', 'spat', 'math'] + for capa in cate_list: + if capa in cate: + tot[capa] += 1 + score[capa] += grade + tot['Overall'] += 1 + tot[cate2] += 1 + score['Overall'] += grade + score[cate2] += grade + + res = defaultdict(list) + res2 = defaultdict(list) + cate_list.append('Overall') + cate2_list.append('Overall') + for k in cate_list: + res['Category'].append(k) + res['tot'].append(tot[k]) + res['acc'].append(score[k] / tot[k] * 100) + for v in cate2_list: + res2['Category'].append(v) + res2['tot'].append(tot[v]) + res2['acc'].append(score[v] / tot[v] * 100) + res = pd.DataFrame(res) + res2 = pd.DataFrame(res2) + return res, res2 diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/moviechat1k.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/moviechat1k.py new file mode 100644 index 0000000000000000000000000000000000000000..efd89b68db1938a6a02051b1da5298020973d80f --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/moviechat1k.py @@ -0,0 +1,111 @@ +# flake8: noqa +from ...smp import * +import numpy as np +import pandas as pd + +FAIL_MSG = 'Failed to obtain answer via API.' + + +CAL_SCORE_PROMPT = """Please evaluate the following video-based question-answer pair: + +Question: {question} +Correct Answer: {answer} +Predicted Answer: {pred_response} + +Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. +Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING. +DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. +For example, your response should look like this: \{'pred': 'yes', 'score': 4.8\}. +""" + +MOVIECHAT1K_DIMENSIONS = { + 'global': ['global'], + 'breakpoint': ['breakpoint'], + 'overall': [] +} + +L3_DIMS = [] +for k, v in MOVIECHAT1K_DIMENSIONS.items(): + if k != 'overall': + L3_DIMS.extend(v) + MOVIECHAT1K_DIMENSIONS['overall'].extend(v) + + +def get_dimension_rating(data_path): + data = load(data_path) + coarse_rating = {k: [] for k in MOVIECHAT1K_DIMENSIONS} + coarse_acc = {k: [] for k in MOVIECHAT1K_DIMENSIONS} + + def parse_score_dict(score_dict): + """Helper function to parse score dictionary string""" + if isinstance(score_dict, dict): + return score_dict + + if isinstance(score_dict, str): + try: + # First try standard json loading + return json.loads(score_dict) + except json.JSONDecodeError: + try: + # If that fails, try eval (safer than literal_eval for this case) + return eval(score_dict) + except: + print(f"Failed to parse score_dict: {score_dict}") + return None + return None + + for i in range(len(data)): + mode = data.iloc[i]['mode'].lower() # Convert to lowercase + score_dict = parse_score_dict(data.iloc[i]['score']) + if score_dict and isinstance(score_dict, dict) and 'pred' in score_dict and 'score' in score_dict: + score = score_dict['score'] + is_correct = 1 if score_dict['pred'].lower() == 'yes' else 0 + else: + score = -1 + is_correct = -1 + + # Map caption types to their lowercase versions + if mode in ['global', 'breakpoint']: + coarse_rating[mode].append(score) + coarse_rating['overall'].append(score) + + if is_correct != -1: + coarse_acc[mode].append(is_correct) + coarse_acc['overall'].append(is_correct) + + + coarse_valid = {k: f'{np.mean([x for x in v if x >= 0]):.2f}' for k, v in coarse_rating.items()} + coarse_accuracy = {k: f'{np.mean(v):.2f}' if v else '0.00' for k, v in coarse_acc.items()} + + return dict( + coarse_valid=coarse_valid, + coarse_accuracy=coarse_accuracy + ) + + + +def prepare_score_prompt(item): + """ + Prepare messages for score evaluation + + Args: + item: DataFrame row containing question, answer, and prediction + + Returns: + list: List of message dictionaries for the model + """ + # Convert Series to dictionary if needed + if isinstance(item, pd.Series): + item = item.to_dict() + + + prompt = f"""Please evaluate the following video-based question-answer pair:\n\n + Question: {item['question']}\n + Correct Answer: {item['answer']}\n + Predicted Answer: {item['prediction']}\n\n + Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. + Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING. + DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. + For example, your response should look like this: {{'pred': 'yes', 'score': 4.8}}.""" + + return prompt diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/mvbench.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mvbench.py new file mode 100644 index 0000000000000000000000000000000000000000..489a98febff2d590feedeba42d3d7516c537b943 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/mvbench.py @@ -0,0 +1,509 @@ +from ...smp import * +from .multiple_choice import extract_answer_from_item +from PIL import Image, ImageOps +import torchvision +import random +import numbers +import math +import torch + + +def get_dimension_rating(data_path): + data = load(data_path) + result_board = {} + for idx, item in data.iterrows(): + if item['task_type'] not in result_board: + result_board[item['task_type']] = [0, 0] + result_board[item['task_type']][1] += 1 + if item['score']: + result_board[item['task_type']][0] += 1 + + correct = 0 + total = 0 + for key, value in result_board.items(): + correct += value[0] + total += value[1] + result_board[key].append(f'{value[0] / value[1] * 100:.2f}%') + + result_board['overall'] = [correct, total, f'{correct / total * 100:.2f}%'] + + return result_board + + +def check_ans(pred, gt): + flag = False + + pred_list = pred.lower().strip().split(' ') + pred_option, _ = pred_list[0], ' '.join(pred_list[1:]) + gt_list = gt.lower().strip().split(' ') + gt_option, gt_content = gt_list[0], ' '.join(gt_list[1:]) + if gt_content[-1] == '.': + gt_content = gt_content[:-1] + + if pred_option.replace('.', '') in gt_option: + flag = True + elif gt_option in pred_option: + flag = True + + return flag + + +def check_ans_with_model(pred, gt, model, item, dataset_name='MVBench'): + flag = False + + pred_list = pred.lower().strip().split(' ') + pred_option, _ = pred_list[0], ' '.join(pred_list[1:]) + gt_list = gt.lower().strip().split(' ') + gt_option, gt_content = gt_list[0], ' '.join(gt_list[1:]) + if gt_content[-1] == '.': + gt_content = gt_content[:-1] + + if pred_option.replace('.', '') in gt_option: + flag = True + elif gt_option in pred_option: + flag = True + elif extract_answer_from_item(model, item, dataset_name)['opt'] == item['answer']: + flag = True + + return flag + + +def check_ans_advanced(pred, gt): + number_table = { + 0: 'zero', + 1: 'one', + 2: 'two', + 3: 'three', + 4: 'four', + 5: 'five', + 6: 'six', + 7: 'seven', + 8: 'eight', + 9: 'nine', + } + flag = False + + pred_list = pred.lower().strip().split(' ') + pred_option, _ = pred_list[0], ' '.join(pred_list[1:]) + gt_list = gt.lower().strip().split(' ') + gt_option, gt_content = gt_list[0], ' '.join(gt_list[1:]) + if gt_content[-1] == '.': + gt_content = gt_content[:-1] + + try: + gt_content = number_table[int(gt_content.strip('. \n'))] + print(gt_content) + except: + pass + + if pred_option.replace('.', '') in gt_option: + flag = True + elif gt_option in pred_option: + flag = True + elif gt_content.lower().strip('. \n') in pred.lower().strip('. \n'): + flag = True + + return flag + + +class GroupRandomCrop(object): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, img_group): + + w, h = img_group[0].size + th, tw = self.size + + out_images = list() + + x1 = random.randint(0, w - tw) + y1 = random.randint(0, h - th) + + for img in img_group: + assert (img.size[0] == w and img.size[1] == h) + if w == tw and h == th: + out_images.append(img) + else: + out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) + + return out_images + + +class MultiGroupRandomCrop(object): + def __init__(self, size, groups=1): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + self.groups = groups + + def __call__(self, img_group): + + w, h = img_group[0].size + th, tw = self.size + + out_images = list() + + for i in range(self.groups): + x1 = random.randint(0, w - tw) + y1 = random.randint(0, h - th) + + for img in img_group: + assert (img.size[0] == w and img.size[1] == h) + if w == tw and h == th: + out_images.append(img) + else: + out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) + + return out_images + + +class GroupCenterCrop(object): + def __init__(self, size): + self.worker = torchvision.transforms.CenterCrop(size) + + def __call__(self, img_group): + return [self.worker(img) for img in img_group] + + +class GroupRandomHorizontalFlip(object): + """Randomly horizontally flips the given PIL.Image with a probability of 0.5 + """ + + def __init__(self, is_flow=False): + self.is_flow = is_flow + + def __call__(self, img_group, is_flow=False): + v = random.random() + if v < 0.5: + ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group] + if self.is_flow: + for i in range(0, len(ret), 2): + # invert flow pixel values when flipping + ret[i] = ImageOps.invert(ret[i]) + return ret + else: + return img_group + + +class GroupNormalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, tensor): + rep_mean = self.mean * (tensor.size()[0] // len(self.mean)) + rep_std = self.std * (tensor.size()[0] // len(self.std)) + + # TODO: make efficient + for t, m, s in zip(tensor, rep_mean, rep_std): + t.sub_(m).div_(s) + + return tensor + + +class GroupScale(object): + """ Rescales the input PIL.Image to the given 'size'. + 'size' will be the size of the smaller edge. + For example, if height > width, then image will be + rescaled to (size * height / width, size) + size: size of the smaller edge + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, interpolation=Image.BILINEAR): + self.worker = torchvision.transforms.Resize(size, interpolation) + + def __call__(self, img_group): + return [self.worker(img) for img in img_group] + + +class GroupOverSample(object): + def __init__(self, crop_size, scale_size=None, flip=True): + self.crop_size = crop_size if not isinstance( + crop_size, int) else (crop_size, crop_size) + + if scale_size is not None: + self.scale_worker = GroupScale(scale_size) + else: + self.scale_worker = None + self.flip = flip + + def __call__(self, img_group): + + if self.scale_worker is not None: + img_group = self.scale_worker(img_group) + + image_w, image_h = img_group[0].size + crop_w, crop_h = self.crop_size + + offsets = GroupMultiScaleCrop.fill_fix_offset( + False, image_w, image_h, crop_w, crop_h) + oversample_group = list() + for o_w, o_h in offsets: + normal_group = list() + flip_group = list() + for i, img in enumerate(img_group): + crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) + normal_group.append(crop) + flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) + + if img.mode == 'L' and i % 2 == 0: + flip_group.append(ImageOps.invert(flip_crop)) + else: + flip_group.append(flip_crop) + + oversample_group.extend(normal_group) + if self.flip: + oversample_group.extend(flip_group) + return oversample_group + + +class GroupFullResSample(object): + def __init__(self, crop_size, scale_size=None, flip=True): + self.crop_size = crop_size if not isinstance( + crop_size, int) else (crop_size, crop_size) + + if scale_size is not None: + self.scale_worker = GroupScale(scale_size) + else: + self.scale_worker = None + self.flip = flip + + def __call__(self, img_group): + + if self.scale_worker is not None: + img_group = self.scale_worker(img_group) + + image_w, image_h = img_group[0].size + crop_w, crop_h = self.crop_size + + w_step = (image_w - crop_w) // 4 + h_step = (image_h - crop_h) // 4 + + offsets = list() + offsets.append((0 * w_step, 2 * h_step)) # left + offsets.append((4 * w_step, 2 * h_step)) # right + offsets.append((2 * w_step, 2 * h_step)) # center + + oversample_group = list() + for o_w, o_h in offsets: + normal_group = list() + flip_group = list() + for i, img in enumerate(img_group): + crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) + normal_group.append(crop) + if self.flip: + flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) + + if img.mode == 'L' and i % 2 == 0: + flip_group.append(ImageOps.invert(flip_crop)) + else: + flip_group.append(flip_crop) + + oversample_group.extend(normal_group) + oversample_group.extend(flip_group) + return oversample_group + + +class GroupMultiScaleCrop(object): + + def __init__(self, input_size, scales=None, max_distort=1, + fix_crop=True, more_fix_crop=True): + self.scales = scales if scales is not None else [1, .875, .75, .66] + self.max_distort = max_distort + self.fix_crop = fix_crop + self.more_fix_crop = more_fix_crop + self.input_size = input_size if not isinstance(input_size, int) else [ + input_size, input_size] + self.interpolation = Image.BILINEAR + + def __call__(self, img_group): + + im_size = img_group[0].size + + crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size) + crop_img_group = [ + img.crop( + (offset_w, + offset_h, + offset_w + crop_w, + offset_h + crop_h)) for img in img_group] + ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation) + for img in crop_img_group] + return ret_img_group + + def _sample_crop_size(self, im_size): + image_w, image_h = im_size[0], im_size[1] + + # find a crop size + base_size = min(image_w, image_h) + crop_sizes = [int(base_size * x) for x in self.scales] + crop_h = [ + self.input_size[1] if abs( + x - self.input_size[1]) < 3 else x for x in crop_sizes] + crop_w = [ + self.input_size[0] if abs( + x - self.input_size[0]) < 3 else x for x in crop_sizes] + + pairs = [] + for i, h in enumerate(crop_h): + for j, w in enumerate(crop_w): + if abs(i - j) <= self.max_distort: + pairs.append((w, h)) + + crop_pair = random.choice(pairs) + if not self.fix_crop: + w_offset = random.randint(0, image_w - crop_pair[0]) + h_offset = random.randint(0, image_h - crop_pair[1]) + else: + w_offset, h_offset = self._sample_fix_offset( + image_w, image_h, crop_pair[0], crop_pair[1]) + + return crop_pair[0], crop_pair[1], w_offset, h_offset + + def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h): + offsets = self.fill_fix_offset( + self.more_fix_crop, image_w, image_h, crop_w, crop_h) + return random.choice(offsets) + + @staticmethod + def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h): + w_step = (image_w - crop_w) // 4 + h_step = (image_h - crop_h) // 4 + + ret = list() + ret.append((0, 0)) # upper left + ret.append((4 * w_step, 0)) # upper right + ret.append((0, 4 * h_step)) # lower left + ret.append((4 * w_step, 4 * h_step)) # lower right + ret.append((2 * w_step, 2 * h_step)) # center + + if more_fix_crop: + ret.append((0, 2 * h_step)) # center left + ret.append((4 * w_step, 2 * h_step)) # center right + ret.append((2 * w_step, 4 * h_step)) # lower center + ret.append((2 * w_step, 0 * h_step)) # upper center + + ret.append((1 * w_step, 1 * h_step)) # upper left quarter + ret.append((3 * w_step, 1 * h_step)) # upper right quarter + ret.append((1 * w_step, 3 * h_step)) # lower left quarter + ret.append((3 * w_step, 3 * h_step)) # lower righ quarter + + return ret + + +class GroupRandomSizedCrop(object): + """Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size + and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio + This is popularly used to train the Inception networks + size: size of the smaller edge + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, interpolation=Image.BILINEAR): + self.size = size + self.interpolation = interpolation + + def __call__(self, img_group): + for attempt in range(10): + area = img_group[0].size[0] * img_group[0].size[1] + target_area = random.uniform(0.08, 1.0) * area + aspect_ratio = random.uniform(3. / 4, 4. / 3) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if random.random() < 0.5: + w, h = h, w + + if w <= img_group[0].size[0] and h <= img_group[0].size[1]: + x1 = random.randint(0, img_group[0].size[0] - w) + y1 = random.randint(0, img_group[0].size[1] - h) + found = True + break + else: + found = False + x1 = 0 + y1 = 0 + + if found: + out_group = list() + for img in img_group: + img = img.crop((x1, y1, x1 + w, y1 + h)) + assert (img.size == (w, h)) + out_group.append( + img.resize( + (self.size, self.size), self.interpolation)) + return out_group + else: + # Fallback + scale = GroupScale(self.size, interpolation=self.interpolation) + crop = GroupRandomCrop(self.size) + return crop(scale(img_group)) + + +class ConvertDataFormat(object): + def __init__(self, model_type): + self.model_type = model_type + + def __call__(self, images): + if self.model_type == '2D': + return images + tc, h, w = images.size() + t = tc // 3 + images = images.view(t, 3, h, w) + images = images.permute(1, 0, 2, 3) + return images + + +class Stack(object): + + def __init__(self, roll=False): + self.roll = roll + + def __call__(self, img_group): + if img_group[0].mode == 'L': + return np.concatenate([np.expand_dims(x, 2) + for x in img_group], axis=2) + elif img_group[0].mode == 'RGB': + if self.roll: + return np.concatenate([np.array(x)[:, :, ::-1] + for x in img_group], axis=2) + else: + # print(np.concatenate(img_group, axis=2).shape) + # print(img_group[0].shape) + return np.concatenate(img_group, axis=2) + + +class ToTorchFormatTensor(object): + """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] + to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """ + + def __init__(self, div=True): + self.div = div + + def __call__(self, pic): + if isinstance(pic, np.ndarray): + # handle numpy array + img = torch.from_numpy(pic).permute(2, 0, 1).contiguous() + else: + # handle PIL Image + img = torch.ByteTensor( + torch.ByteStorage.from_buffer( + pic.tobytes())) + img = img.view(pic.size[1], pic.size[0], len(pic.mode)) + # put it from HWC to CHW format + # yikes, this transpose takes 80% of the loading time/CPU + img = img.transpose(0, 1).transpose(0, 2).contiguous() + return img.float().div(255) if self.div else img.float() + + +class IdentityTransform(object): + + def __call__(self, data): + return data diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/physics_eval_utils.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/physics_eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5e1252d4ed1b3e90b5fe9a840efc778f4d4e2c5a --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/physics_eval_utils.py @@ -0,0 +1,132 @@ +import re +import logging +import os +from sympy import simplify, expand, trigsimp +from sympy.parsing.latex import parse_latex +from dotenv import load_dotenv +import timeout_decorator + + +load_dotenv() + +Judge_SYS_PROMPT = "You are an assistant that compares LaTeX expressions for equivalence." + +Judge_USER_PROMPT = "Compare the following LaTeX expressions and check if the numerical parts are equivalent in meaning.\n\nExpression 1:\n{expr1}\n\nExpression 2:\n{expr2}\n\nReturn True if they are equivalent, otherwise return False. Focus on mathematical content." # noqa: E501 + + +def extract_all_boxed_content(latex_response, latex_wrap=r'\\boxed{([^{}]*|{.*?})}'): + pattern = re.compile( + r'\\boxed{((?:[^{}]|{(?:[^{}]|{.*?})*})*)}' + r'|\\\\\[boxed{((?:[^{}]|{(?:[^{}]|{.*?})*})*)}\\\\\]', + re.DOTALL + ) + matches = pattern.findall(latex_response) + if not matches: + return [] + return [match.strip() for sublist in matches for match in sublist if match.strip()] + + +def extract_final_answer(latex_response): + match = re.search(r'\\boxed{(.*?)}|\\\\\[boxed{(.*?)}\\\\\]', latex_response) + if match: + return next(group for group in match.groups() if group).strip() + return latex_response + + +def extract_final_answer_list(last_answer): + matches = re.findall(r'\\boxed{\\\[(.*?)\\\]}|\\\\\[boxed{\\\[(.*?)\\\]}\\\\\]', last_answer) + if matches: + return [item.strip() for sublist in matches for item in sublist if item for item in item.split(',')] + return [extract_final_answer(last_answer)] + + +def extract_final_answer_allform(latex_response, answer_type=None, latex_wrap=r'\\boxed{(.*?)}'): + boxed_content = extract_all_boxed_content(latex_response, latex_wrap) + if not boxed_content: + return [] + + if answer_type == 'list': + return [extract_final_answer_list(item) for item in boxed_content] + return [extract_final_answer(item) for item in boxed_content] + + +def _extract_core_eq(expr: str) -> str: + if "\\implies" in expr: + expr = expr.split("\\implies")[-1].strip() + if "=" in expr: + expr = expr.split("=")[-1].strip() + return expr + + +def _preprocess_latex(string: str) -> str: + if not string: + return "" + string = re.sub(r"_\{.*?\}", "", string) + string = re.sub(r"_\\?\w", "", string) + string = string.replace("\\left", "").replace("\\right", "").replace("\\cdot", "*") + return string + + +@timeout_decorator.timeout(10, use_signals=False) +def _standardize_expr(expr): + return simplify(expand(trigsimp(expr))) + + +def is_equiv(model, expr1: str, expr2: str, verbose: bool = False) -> dict: + result_data = { + "input_expressions": {"expr1": expr1, "expr2": expr2}, + "preprocessed_expressions": {}, + "sympy_result": None, + "llm_result": None, + "final_result": None, + "error": None, + "llm_comparison_result": None, + } + try: + if "\text" in expr1 or "\text" in expr2: + model.sys_prompt = Judge_SYS_PROMPT + user_prompt = Judge_USER_PROMPT.format(expr1=expr1, expr2=expr2) + generate_result = model.generate(user_prompt) + if generate_result and "true" in generate_result.lower(): + result_data["llm_result"] = 1 + else: + result_data["llm_result"] = 0 + result_data["final_result"] = result_data["llm_result"] + return result_data + + expr1_processed = _preprocess_latex(expr1) + expr2_processed = _preprocess_latex(expr2) + expr1_core = _extract_core_eq(expr1_processed) + expr2_core = _extract_core_eq(expr2_processed) + + try: + expr1_sympy = _standardize_expr(parse_latex(expr1_core)) + expr2_sympy = _standardize_expr(parse_latex(expr2_core)) + result_data["preprocessed_expressions"] = { + "expr1": str(expr1_sympy), + "expr2": str(expr2_sympy) + } + + sympy_result = simplify(expr1_sympy - expr2_sympy) == 0 or expr1_sympy.equals(expr2_sympy) + except Exception as e: + result_data["error"] = str(e) + sympy_result = None + + result_data["sympy_result"] = sympy_result + + if sympy_result: + result_data["final_result"] = True + else: + model.sys_prompt = Judge_SYS_PROMPT + user_prompt = Judge_USER_PROMPT.format(expr1=expr1, expr2=expr2) + generate_result = model.generate(user_prompt) + if generate_result and "true" in generate_result.lower(): + result_data["llm_result"] = 1 + else: + result_data["llm_result"] = 0 + result_data["final_result"] = result_data["llm_result"] + + except Exception as e: + result_data["error"] = str(e) + + return result_data diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/shortqa.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/shortqa.py new file mode 100644 index 0000000000000000000000000000000000000000..9e84fd30605ac3237039ae968a8723555aef5177 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/shortqa.py @@ -0,0 +1,276 @@ +# flake8: noqa +from vlmeval.smp import * + + +EVAL_TMPL = """ +You are an AI assistant tasked with evaluating whether a model's response correctly answers a given visual-language question. + +You will be provided with: +1. The question (text only) +2. The model's response +3. The ground truth answer + +Your task is to determine whether the model's response conveys the same meaning as the ground truth. The response is considered **correct** if: +- It has the same meaning as the ground truth, even if phrased differently. +- It provides additional relevant details without altering the original meaning. + +The response is considered **wrong** if: +- It contradicts the ground-truth +- It misses essential information or include additional incorrect information. + +Your evaluation should include the following fields: +- **Correctness**: Either `"yes"` (if correct) or `"no"` (if incorrect). +- **Reason**: A brief explanation of your judgment. + +{requirement} + +Here are some examples: +{examples} + +Now please complete the following task: + +[Begin Question]{question}[End Question] +[Begin Ground-Truth]{ground_truth}[End Ground-Truth] +[Begin Response]{response}[End Response] +""" + +EVAL_TMPL_CN = """ +你是一名 AI 助理,负责评估模型的回答是否正确回答了给定的视觉语言问题。 + +你将被提供以下信息: +1. 问题(仅包含文本) +2. 模型的回答 +3. 标准答案(Ground Truth) + +你的任务是判断模型的回答是否与标准答案表达相同的含义。若满足以下条件,则认为回答是**正确的**: +- 回答的含义与标准答案相同,即使措辞不同。 +- 回答提供了额外的相关细节,但没有改变原本的含义。 + +若满足以下条件,则认为回答是**错误的**: +- 回答与标准答案相矛盾。 +- 回答遗漏了关键信息,或包含额外的错误信息。 + +你的评估应包含以下字段: +- **正确性(Correctness)**:值应为 `"yes"`(正确)或 `"no"`(错误)。 +- **原因(Reason)**:对你的判断进行简要解释。 + +{requirement} + +以下是一些示例: +{examples} + +现在,请完成以下任务: +[Begin Question]{question}[End Question] +[Begin Ground-Truth]{ground_truth}[End Ground-Truth] +[Begin Response]{response}[End Response] +""" + +en_single_ICEs = [ + { + 'question': 'Please tell me the name of the man in this image, in the format of "[First Name] [Given Name]".', + 'answer': 'Franklin D. Roosevelt', + 'prediction': 'Franklin Roosevelt', + 'correctness': "yes", + 'reason': 'The model response basically align with the ground-truth answer, just omitted the middle name. Thus the response is correct.' + }, + { + 'question': 'Please tell me the name of the man in this image, in the format of "[First Name] [Given Name]".', + 'answer': 'Usain Bolt', + 'prediction': 'Bolt', + 'correctness': 'no', + 'reason': 'The question asks the model to output the ' + }, + { + 'question': 'Where did the text in this image originate from', + 'answer': 'Ancient Egypt', + 'prediction': 'egypt', + 'correctness': 'yes', + 'reason': 'The model response basically align with the ground-truth answer (egypt vs. Ancient Egypt). Thus the response is correct. ' + }, + { + 'question': 'Name this building', + 'answer': "St. Peter's Basilica Church", + 'prediction': 'st peters basilica', + 'correctness': 'yes', + 'reason': "The model response basically align with the ground-truth answer. Thus the response is correct." + }, + { + 'question': 'Extract the text on the umbrella in the image', + 'answer': 'keter', + 'prediction': 'ketter', + 'correctness': 'no', + 'reason': 'The question requires the model to exactly extract the text on the umbrella. The model response does not contain the exact text on the umbrella (keter). Thus the response is incorrect. ' + } +] + +en_multiple_ICEs = [ + { + 'question': 'Please tell me the name of the man in this image, in the format of "[First Name] [Given Name]".', + 'answer': 'Franklin D. Roosevelt', + 'prediction': 'Franklin Roosevelt', + 'correctness': "yes", + 'reason': 'The model response basically align with the ground-truth answer, just omitted the middle name. Thus the response is correct.' + }, + { + 'question': 'Please tell me the name of the man in this image, in the format of "[First Name] [Given Name]".', + 'answer': 'Usain Bolt', + 'prediction': 'Bolt', + 'correctness': 'no', + 'reason': 'The question asks the model to output the ' + }, + { + 'question': 'Name all countries besides this lake', + 'answer': "['Jordan', 'Israel', 'Palestine']", + 'prediction': "Israel, Jordan", + 'correctness': 'no', + 'reason': 'The model response does not contain all the countries besides this lake (missing Palestine). Thus the response is incorrect.' + }, + { + 'question': 'Name this building, as well as the country that the building located in.', + 'answer': "['La Tour Eiffel (or Eiffel Tower)', 'France']", + 'prediction': "Eiffel Tower, France", + 'correctness': 'yes', + 'reason': 'The model response basically align with the ground-truth answer. Thus the response is correct.' + }, + { + 'question': 'Name this attraction, as well as the country that this attraction located in. ', + 'answer': "['Notre Dame de Paris', 'France']", + 'prediction': "Notre-Dame Cathedral, France", + 'correctness': 'yes', + 'reason': 'The model response basically align with the ground-truth answer. Thus the response is correct.' + }, + { + 'question': "Who are the top three players in terms of goals scored for these teams' Top Team Scorers?", + 'answer': "['Mohamed Salah','Erling Haaland','Alexander Isak']", + 'prediction': "The top three players in terms of goals scored for these teams' Top Team Scorers are: 1. Mohamed Salah - 19 goals (Liverpool) 2. Erling Haaland - 18 goals (Manchester City) 3. Alexander Isak - 17 goals (Newcastle Utd)", + 'correctness': 'yes', + 'reason': 'The model response align with the ground-truth answer, and include some additional information including the team and number of goals of the player. Thus the response is correct.' + } +] + +cn_single_ICEs = [ + { + 'question': '请直接告诉我图中右侧人物的名字', + 'answer': '姚明', + 'prediction': 'Yao Ming', + 'correctness': 'yes', + 'reason': '回答中用英语提及了人物的名称 Yao Ming,因此回答正确。' + }, + { + 'question': '图中右起第几位女士的身高最高?', + 'answer': '右起第一位', + 'prediction': '1', + 'correctness': 'yes', + 'reason': '回答中提及了第一位 (依据提问,默认为右起),因此回答正确。' + }, + { + 'question': '图中文物的具体名称是什么?', + 'answer': '司母戊鼎', + 'prediction': '鼎', + 'correctness': 'no', + 'reason': '问题询问文物的具体名称,回答仅提及了鼎,但未提及司母戊鼎,因此回答错误。' + }, + { + 'question': '图中的景点位于哪个省份?', + 'answer': '四川', + 'prediction': '图中的景点是九寨沟,位于中国四川省阿坝藏族羌族自治州。', + 'correctness': 'yes', + 'reason': '回答提及景点位于四川省,与标准答案一致,且更加详尽,因此回答正确' + } +] + +cn_multiple_ICEs = [ + { + 'question': '图中文物的具体名称是什么?', + 'answer': '司母戊鼎', + 'prediction': '鼎', + 'correctness': 'no', + 'reason': '问题询问文物的具体名称,回答仅提及了鼎,但未提及司母戊鼎,因此回答错误。' + }, + { + 'question': 'JiaQi在2025年与哪些战队交过手?', + 'answer': "['⁠Ninjas in Pyjamas','Invictus Gaming']", + 'prediction': "根据提供的图片信息,JiaQi在2025年与以下战队交过手:\n- TES Challenger\n- ⁠Ninjas in Pyjamas\n- Invictus Gaming", + 'correctness': 'no', + 'reason': "依据问题与标准答案,JiaQi 在 2025 年应仅与 Ninjas in Pyjamas 与 Invictus Gaming 两支战队交过手。模型回复中,TES Challenger 未在标准答案中提及,因此回答错误。" + }, + { + 'question': '根据图中内容,请列出全部在 2022年7月份到10月份间的开源模型?', + 'answer': "['CodeGeeX','GLM','Flan-T5','NLLB']", + 'prediction': "根据图中内容,2022年7月份到10月份间的开源模型有:Tk-Instruct, Luminous, GLM, NLLB", + 'correctness': 'no', + 'reason': "根据问题与标准答案,2022年7月份到10月份间的开源模型有 CodeGeeX, GLM, Flan-T5, NLLB。模型回复中,Tk-Instruct 与 Luminous 未在标准答案中提及,且遗漏了 cODEGeeX 与 Flan-T5,因此回答错误。" + }, + { + 'question': '图中的景点是什么,位于哪个城市', + 'answer': "['少林寺', '河南登封市']", + 'prediction': "The scenic spot in the picture is Shaolin Temple, located in Dengfeng City, Henan Province.", + 'correctness': 'yes', + 'reason': "答案中提及了少林寺及河南省登封市,因此回答正确" + }, + { + 'question': '图中中央的物品是什么,它最流行于中国的南方还是北方?', + 'answer': "['铜火锅', '北方']", + 'prediction': '图中中央的物品是火锅,它最流行于中国的北方。', + 'correctness': "yes", + "reason": "回答中提及了火锅及北方,因此回答正确。" + }, + { + 'question': '请直接告诉我图中右侧人物的名字', + 'answer': "['姚明', '易建联']", + 'prediction': 'Yao Ming & Jianlian Yi', + 'correctness': 'yes', + 'reason': '回答中用英语提及了姚明与易建联的名字,与标准答案一致,因此回答正确。' + }, +] + + +def ICE_builder(ICEs): + res = '' + for i, eg in enumerate(ICEs): + res += f"Example {i + 1}:\n" + res += "[Begin Question]" + eg['question'] + "[End Question]\n" + res += "[Begin Ground-Truth]" + eg['answer'] + "[End Ground-Truth]\n" + res += "[Begin Response]" + eg['prediction'] + "[End Response]\n" + res += "[Begin Correctness]" + eg['correctness'] + "[End Correctness]\n" + res += "[Begin Reason]" + eg['reason'] + "[End Reason]\n" + res += '\n' + return res + + +def ShortQA_prompt(line): + question = line['question'] + is_cn = cn_string(question) + answer = line['answer'] + answer_type = line.get('answer_type', 'listofstr') + if answer[0] == '[' and answer[-1] == ']' and answer_type not in ('exactMatch', 'multipleChoice'): + answer = eval(answer) + else: + answer = [answer] + + requirements = { + 'en_multi': "The provided ground-truth is a list. The answer is correct if the model response contains and only contains all contents in the list (no other answer included)", + 'cn_multi': "题目提供的标准答案是一个列表。如果模型回答包含且仅包含列表中的所有内容,则回答正确", + } + + examples = '' + if is_cn: + examples = ICE_builder(cn_single_ICEs if len(answer) == 1 else cn_multiple_ICEs) + else: + examples = ICE_builder(en_single_ICEs if len(answer) == 1 else en_multiple_ICEs) + + if len(answer) > 1: + requirement = requirements['en_multi'] if not is_cn else requirements['cn_multi'] + else: + requirement = '' + answer = answer[0] + + tmpl = EVAL_TMPL_CN if is_cn else EVAL_TMPL + prompt = tmpl.format( + question=question, + examples=examples, + requirement=requirement, + ground_truth=answer, + response=line['prediction'] + ) + return prompt diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/tablevqabench.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/tablevqabench.py new file mode 100644 index 0000000000000000000000000000000000000000..0782f55a9852ee92805bea4e6b37ac56927f29d9 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/tablevqabench.py @@ -0,0 +1,500 @@ +""" +Copied from https://github.com/allenai/allennlp-semparse +Modified from https://github.com/naver-ai/tablevqabench +""" + +import re +import unicodedata +import time + +from abc import ABCMeta, abstractmethod +from math import isinf, isnan + + +# Vision Prompts +VWTQ_PROMPT = ( + 'You are asked to answer questions asked on an image.\n' + 'You should answer the question with a single word.\n' + 'Example: \n' + 'Question: what was the only year mr. wu competed in the olympic games?\n' + 'Answer: 2004\n' + 'Question: which township in pope county, arkansas has the least amount of water area?\n' + 'Answer: Freeman\n' + 'If you have multiple answers, please separate them with || marks. Example: Apple||Banana||Tomato\n\n' + 'Question: {question}\n' + 'Answer:' +) + +VTABFACT_PROMPT = ( + 'You are asked to answer whether the statement is True or False based on given image\n' + 'You should only answer True or False.\n' + 'Example: \n' + 'Statement: the milwaukee buck win 6 game in the 2010 - 11 season\n' + 'Answer: True\n' + 'Statement: only the top team score above the average of 8.8\n' + 'Answer: False\n\n' + 'Statement: {question}\n' + 'Answer:' +) + +FINTABNETQA_PROMPT = ( + 'You are asked to answer questions asked on a image.\n' + 'You should answer the question within a single word or few words.\n' + 'If units can be known, the answer should include units such as $, %, million and etc.\n' + 'Example: \n' + 'Question: What were the total financing originations for the fiscal year ended October 31, 2004?\n' + 'Answer: $3,852 million\n' + 'Question: What is the time period represented in the table?\n' + 'Answer: October 31\n' + 'Question: What was the percentage of net sales for selling, general and administrative expenses in 2006?\n' + 'Answer: 34.2%\n' + 'Question: {question}\n' + 'Answer:' +) + + +def evaluate_tabfact(data, score_keys): + num_examples = 0 + num_correct = 0 + manual_check = 0 + start_time = time.time() + for instance in data: + if instance['prediction'] is None: + instance['prediction'] = 'none' + pred = instance['prediction'].lower() + gt = instance['answer'] + num_examples += 1 + if 'true' in pred and 'false' in pred: + manual_check += 1 + score = None + elif 'true' in pred and gt == '1': + num_correct += 1 + score = 1 + elif 'false' in pred and gt == '0': + num_correct += 1 + score = 1 + else: + score = 0 + instance['scores'] = {score_keys[0]: score} + if manual_check > 0: + print(f'the number of not properly parsed samples: {manual_check}') + end_time = time.time() + elapsed_time = end_time - start_time + Accuracy = round((num_correct + 1e-9) / (num_examples + 1e-9), 8) * 100 + meta = { + 'evaluators': 'correctness', + 'score_info': [score_keys[0]], + 'evaluated_time': elapsed_time, + 'total_num_sample': len(data), + 'average_scores': [Accuracy], + } + return meta + + +def evaluate_wtq(data, score_keys): + num_examples = 0 + num_correct = 0 + start_time = time.time() + + for instance in data: + pred = instance['prediction'].replace('||', '|') + gt = instance['answer'] + original_strings = tsv_unescape_list(gt) + target_values = to_value_list(original_strings) + + predicted_strings = tsv_unescape_list(pred) + predicted_values = to_value_list(predicted_strings) + correct = check_denotation(target_values, predicted_values) + num_examples += 1 + score = 0 + if correct: + num_correct += 1 + score = 1 + instance['scores'] = {score_keys[0]: score} + + end_time = time.time() + elapsed_time = end_time - start_time + + Accuracy = round((num_correct + 1e-9) / (num_examples + 1e-9), 8) * 100 + meta = { + 'evaluators': 'correctness', + 'score_info': [score_keys[0]], + 'evaluated_time': elapsed_time, + 'total_num_sample': len(data), + 'average_scores': [Accuracy], + } + return meta + + +def evaluate_fintabnet(data, score_keys): + num_examples = 0 + num_correct, _num_correct = 0, 0 + start_time = time.time() + for instance in data: + pred, preds = fintabnet_normalize(instance['prediction']) + gt, gts = fintabnet_normalize(instance['answer']) + correct = 1 if gt == pred else 0 + _correct = any(_pred == _gt for _pred in preds for _gt in gts) + num_examples += 1 + score, _score = 0, 0 + if correct: + num_correct += 1 + score = 1 + if _correct: + _num_correct += 1 + _score = 1 + instance['scores'] = {score_keys[0]: _score, 'exact_score': score} + + end_time = time.time() + elapsed_time = end_time - start_time + Accuracy = round((num_correct + 1e-9) / (num_examples + 1e-9), 8) * 100 + _Accuracy = round((_num_correct + 1e-9) / (num_examples + 1e-9), 8) * 100 + meta = { + 'evaluators': 'correctness', + 'score_info': ['relieved_accuracy', score_keys[0]], + 'evaluated_time': elapsed_time, + 'total_num_sample': len(data), + 'average_scores': [_Accuracy, Accuracy], + } + return meta + + +def fintabnet_normalize(s): + s = normalize(s) + remove_words = [ + 'dollar', 'gallons', 'square feet', 'shares', 'mbtu', + 'mbpd', 'mbbls', 'mmbtu', 'unit', 'gwh', 'year', 'mmcf', 'mile', 'mboe' + ] + + # Data specific filtering using regular expressions + # Remove special characters like $, (, and ) + s = re.sub(r'[\$\(\),]', '', s) + + # Replace "dollar" with empty string if it's not part of another word + pattern = r'\b(' + '|'.join(remove_words) + r')s?\b' + s = re.sub(pattern, '', s, flags=re.IGNORECASE) + + # Unit conversion dictionary with regex patterns for flexibility + unit_conversion = { + r' \bthousand\b': 'e3', + r' \bmillion\b': 'e6', + r' \bbillion\b': 'e9', + r'\bthousand\b': 'e3', + r'\bmillion\b': 'e6', + r'\bbillion\b': 'e9', + r' ?%': 'e-2', + } + + # Convert percentages to their decimal representation. + # Applying this after unit_conversion prevents "percent" from being processed + # in cases like "million %", which would be incorrect. + # s = re.sub(r' ?%', 'e-2', s) + # s_percent = re.sub(r' ?%', '', s_percent) + + s_unit_free = s + + # Iterate over unit_conversion and apply transformations + for pattern, value in unit_conversion.items(): + s = re.sub(pattern, value, s) + s_unit_free = re.sub(pattern, '', s_unit_free) + + # Attempt to convert to float + try: + return float(s), [float(s), float(s_unit_free)] + except ValueError: + # Return the original string and the error for debugging purposes + return s, [s, s_unit_free] + + +def normalize(x): + if not isinstance(x, str): + x = x.decode('utf8', errors='ignore') + # Remove diacritics + x = ''.join( + c for c in unicodedata.normalize('NFKD', x) if unicodedata.category(c) != 'Mn' + ) + # Normalize quotes and dashes + x = re.sub(r'[‘’´`]', "'", x) + x = re.sub(r'[“”]', '"', x) + x = re.sub(r'[‐‑‒–—−]', '-', x) + while True: + old_x = x + # Remove citations + x = re.sub(r'((? backslash + n + vertical bar (0x7C) -> backslash + p + backslash (0x5C) -> backslash + backslash + + Args: + x (str or unicode) + Returns: + a unicode + """ + return x.replace(r'\n', '\n').replace(r'\p', '|').replace('\\\\', '\\') + + +def tsv_unescape_list(x): + """Unescape a list in the TSV file. + List items are joined with vertical bars (0x5C) + + Args: + x (str or unicode) + Returns: + a list of unicodes + """ + return [tsv_unescape(y) for y in x.split('|')] diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/tamperbench.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/tamperbench.py new file mode 100644 index 0000000000000000000000000000000000000000..1194642a6407281c558891fe369782038d192e2d --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/tamperbench.py @@ -0,0 +1,737 @@ +from ...smp import * +from .multiple_choice import extract_answer_from_item +from PIL import Image, ImageOps +import torchvision +import random +import numbers +import math +import torch +import json +import pandas as pd + + +import numpy as np +import re + + +def get_dimension_rating(data_path, category_type='task_type'): + data = load(data_path) + result_board = {} + for idx, item in data.iterrows(): + if item[category_type] not in result_board: + result_board[item[category_type]] = [0, 0] + result_board[item[category_type]][1] += 1 + if item['score']: + result_board[item[category_type]][0] += 1 + + correct = 0 + total = 0 + for key, value in result_board.items(): + correct += value[0] + total += value[1] + result_board[key].append(f'{value[0] / value[1] * 100:.2f}%') + + result_board['overall'] = [correct, total, f'{correct / total * 100:.2f}%'] + + return result_board + + +def process_results(score_file,model_name): + from sklearn.metrics import ( + accuracy_score, + precision_score, + recall_score, + f1_score, + classification_report, + confusion_matrix, + roc_auc_score + ) + data = pd.read_excel(score_file) + + # Create the prediction column based on the Score and Answer columns + data['prediction'] = data.apply( + lambda row: row['answer'] if row['score'] == 1 else ('Yes' if row['answer'] == 'No' else 'No'), axis=1 + ) + + # Recompute metrics for tamper types including 'original' in the calculations but exclude 'original' from the output + grouped_metrics_with_original_excluding_original = {} + + original_group = data[data['tamper_type'] == 'original'] + + for tamper_type, group in data[data['tamper_type'] != 'original'].groupby('tamper_type'): + # Combine the current group with the 'original' group + combined_group = pd.concat([group, original_group]) + + # Extract ground truth and predictions for the combined group + y_true_group = combined_group['answer'].map({'Yes': 1, 'No': 0}) + y_pred_group = combined_group['prediction'].map({'Yes': 1, 'No': 0}) + + # Calculate metrics for the combined group + accuracy = accuracy_score(y_true_group, y_pred_group) + precision = precision_score(y_true_group, y_pred_group, zero_division=0) + recall = recall_score(y_true_group, y_pred_group, zero_division=0) + f1 = f1_score(y_true_group, y_pred_group, zero_division=0) + conf_matrix = confusion_matrix(y_true_group, y_pred_group) + + # Store metrics for the tamper_type + grouped_metrics_with_original_excluding_original[tamper_type] = { + "Accuracy": accuracy, + "Precision": precision, + "Recall": recall, + "F1 Score": f1, + "Confusion Matrix": conf_matrix.tolist() # Convert to list for JSON compatibility + } + + # Add the Macro Average row to the Dictionary + # grouped_metrics_with_original_excluding_original["overall"] = macro_averages + + # Display the metrics in a dataframe for clarity + df_grouped_metrics_with_original_excluding_original = pd.DataFrame.from_dict( + grouped_metrics_with_original_excluding_original, orient='index' + ) + + # Compute Macro Averages for Accuracy, Precision, Recall, and F1 Score + macro_averages = { + "Accuracy": df_grouped_metrics_with_original_excluding_original["Accuracy"].mean(), + "Precision": df_grouped_metrics_with_original_excluding_original["Precision"].mean(), + "Recall": df_grouped_metrics_with_original_excluding_original["Recall"].mean(), + "F1 Score": df_grouped_metrics_with_original_excluding_original["F1 Score"].mean(), + "Confusion Matrix": "N/A" # Macro average doesn't have a meaningful confusion matrix + } + + # # Add the Macro Average row to the DataFrame + df_grouped_metrics_with_original_excluding_original.loc["overall"] = macro_averages + + # df_grouped_metrics_with_original_excluding_original + metrics_dict = json.loads(df_grouped_metrics_with_original_excluding_original.T.to_json()) + # Process Model Level Metrics + formatted_data = [] + for task, task_metrics in metrics_dict.items(): + task_metrics['Model'] = model_name + task_metrics['Task'] = task + formatted_data.append(task_metrics) + + df_metrics = pd.DataFrame(formatted_data) + + # Reorder columns to make 'Model' and 'Task' appear first + columns_order = ['Model', 'Task'] + [col for col in df_metrics.columns if col not in ['Model', 'Task']] + df_metrics = df_metrics[columns_order] + + return df_metrics + + +def aggregate_metrics_with_macro_average(score_file): + from sklearn.metrics import ( + accuracy_score, + precision_score, + recall_score, + f1_score, + classification_report, + confusion_matrix, + roc_auc_score + ) + # Load data + data = pd.read_excel(score_file) + + # Create the prediction column based on the Score and Answer columns + data['prediction'] = data.apply( + lambda row: row['answer'] if row['score'] == 1 else ('Yes' if row['answer'] == 'No' else 'No'), axis=1 + ) + + # Initialize a dictionary to store metrics + task_type_metrics = {} + + # Process each task_type separately + for task_type, task_group in data.groupby('task_type'): + # Separate the 'original' group for the current task_type + original_group = task_group[task_group['tamper_type'] == 'original'] + + # Skip if there is no 'original' data for this task_type + if original_group.empty: + continue + + # Process each tamper type for the current task_type (excluding 'original') + tamper_metrics = {} + for tamper_type, tamper_group in task_group[task_group['tamper_type'] != 'original'].groupby('tamper_type'): + + # Combine the tamper group with the original group of the current task_type + combined_group = pd.concat([tamper_group, original_group]) + + # Map answers and predictions to binary values + y_true = combined_group['answer'].map({'Yes': 1, 'No': 0}) + y_pred = combined_group['prediction'].map({'Yes': 1, 'No': 0}) + + # Compute metrics + accuracy = accuracy_score(y_true, y_pred) + precision = precision_score(y_true, y_pred, zero_division=0) + recall = recall_score(y_true, y_pred, zero_division=0) + f1 = f1_score(y_true, y_pred, zero_division=0) + conf_matrix = confusion_matrix(y_true, y_pred) + + # Store metrics for the tamper_type + tamper_metrics[tamper_type] = { + "Accuracy": accuracy, + "Precision": precision, + "Recall": recall, + "F1 Score": f1, + "Confusion Matrix": conf_matrix.tolist() # Convert to list for JSON compatibility + } + + # Compute Macro Averages for the current task_type + metrics_df = pd.DataFrame(tamper_metrics).T + macro_average = { + "Accuracy": metrics_df["Accuracy"].mean(), + "Precision": metrics_df["Precision"].mean(), + "Recall": metrics_df["Recall"].mean(), + "F1 Score": metrics_df["F1 Score"].mean(), + "Confusion Matrix": "N/A" # Macro average doesn't have a meaningful confusion matrix + } + + # Add the macro average as "overall" for the task_type + tamper_metrics["overall"] = macro_average + + # Add tamper metrics for the current task_type to the main dictionary + task_type_metrics[task_type] = tamper_metrics + + # Transform the nested dictionary into a DataFrame + dataframes = [] + for task_type, metrics in task_type_metrics.items(): + task_df = pd.DataFrame.from_dict(metrics, orient='index') + task_df['task_type'] = task_type # Add the task_type as a column + dataframes.append(task_df) + + # Combine all task-specific DataFrames into a single DataFrame + result_df = pd.concat(dataframes).reset_index().rename(columns={'index': 'tamper_type'}) + # Reorder the columns to place task_type first, then tamper_type + result_df = result_df[['task_type', 'tamper_type', 'Accuracy', 'Precision', 'Recall', + 'F1 Score', 'Confusion Matrix']] + + # Select only numeric columns for aggregation + numeric_columns = ['Accuracy', 'Precision', 'Recall', 'F1 Score'] + + # Group by task_type and tamper_type, and calculate the mean for numeric columns + average_metrics = result_df.groupby(['task_type', 'tamper_type'])[numeric_columns].mean().reset_index() + + return average_metrics + + +def check_ans(pred, gt): + """ + Checks if the predicted answer matches the ground truth. + + Args: + pred (str): The predicted answer. + gt (str): The ground truth answer. + + Returns: + bool: True if the predicted answer matches the ground truth, False otherwise. + """ + # Convert both predictions and ground truths to lowercase and split them into options and contents + flag = False + + # Split prediction into option and content + pred_list = pred.lower().strip().split(' ') + pred_option, _ = pred_list[0], ' '.join(pred_list[1:]) + + # Split ground truth into option and content + gt_list = gt.lower().strip().split(' ') + gt_option, gt_content = gt_list[0], ' '.join(gt_list[1:]) + + # Remove trailing period from ground truth content if present + if gt_content[-1] == '.': + gt_content = gt_content[:-1] + + # Check for matching conditions + # Condition 1: If the predicted option is a substring of the ground truth option + if pred_option.replace('.', '') in gt_option: + flag = True + # Condition 2: If the ground truth option is a substring of the predicted option + elif gt_option in pred_option: + flag = True + # Condition 3: If the ground truth is a substring of the predicted answer + elif gt in pred: + flag = True + + return flag + + +def check_ans_with_model(pred, gt, model, item, dataset_name='MVBench'): + """ + Checks if the predicted answer matches the ground truth using a given model. + + Args: + pred (str): The predicted answer. + gt (str): The ground truth answer. + model: A machine learning model used for additional verification. + item (dict): An item containing information about the question or task. + dataset_name (str, optional): Name of the dataset being used. Defaults to 'MVBench'. + + Returns: + bool: True if the predicted answer matches the ground truth, False otherwise. + """ + # Initialize flag to track match status + flag = False + + # Preprocess prediction and ground truth by converting to lowercase and splitting into options and contents + pred_list = pred.lower().strip().split(' ') + pred_option, _ = pred_list[0], ' '.join(pred_list[1:]) + gt_list = gt.lower().strip().split(' ') + gt_option, gt_content = gt_list[0], ' '.join(gt_list[1:]) + + # Remove trailing period from ground truth content if presen + if gt_content[-1] == '.': + gt_content = gt_content[:-1] + + # Check for matching conditions + # Condition 1: If the predicted option is a substring of the ground truth option + if pred_option.replace('.', '') in gt_option: + flag = True + # Condition 2: If the ground truth option is a substring of the predicted option + elif gt_option in pred_option: + flag = True + # Condition 3: Use the provided model to verify the answer + elif extract_answer_from_item(model, item, dataset_name)['opt'] == item['answer']: + flag = True + + return flag + + +def check_ans_advanced(pred, gt): + number_table = { + 0: 'zero', + 1: 'one', + 2: 'two', + 3: 'three', + 4: 'four', + 5: 'five', + 6: 'six', + 7: 'seven', + 8: 'eight', + 9: 'nine', + } + flag = False + + pred_list = pred.lower().split(' ') + pred_option, _ = pred_list[0], ' '.join(pred_list[1:]) + gt_list = gt.lower().split(' ') + gt_option, gt_content = gt_list[0], ' '.join(gt_list[1:]) + if gt_content[-1] == '.': + gt_content = gt_content[:-1] + + try: + gt_content = number_table[int(gt_content.strip('. \n'))] + print(gt_content) + except: + pass + + if pred_option.replace('.', '') in gt_option: + flag = True + elif gt_option in pred_option: + flag = True + elif gt_content.lower().strip('. \n') in pred.lower().strip('. \n'): + flag = True + + return flag + + +class GroupRandomCrop(object): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, img_group): + + w, h = img_group[0].size + th, tw = self.size + + out_images = list() + + x1 = random.randint(0, w - tw) + y1 = random.randint(0, h - th) + + for img in img_group: + assert (img.size[0] == w and img.size[1] == h) + if w == tw and h == th: + out_images.append(img) + else: + out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) + + return out_images + + +class MultiGroupRandomCrop(object): + def __init__(self, size, groups=1): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + self.groups = groups + + def __call__(self, img_group): + + w, h = img_group[0].size + th, tw = self.size + + out_images = list() + + for i in range(self.groups): + x1 = random.randint(0, w - tw) + y1 = random.randint(0, h - th) + + for img in img_group: + assert (img.size[0] == w and img.size[1] == h) + if w == tw and h == th: + out_images.append(img) + else: + out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) + + return out_images + + +class GroupCenterCrop(object): + def __init__(self, size): + self.worker = torchvision.transforms.CenterCrop(size) + + def __call__(self, img_group): + return [self.worker(img) for img in img_group] + + +class GroupRandomHorizontalFlip(object): + """Randomly horizontally flips the given PIL.Image with a probability of 0.5 + """ + + def __init__(self, is_flow=False): + self.is_flow = is_flow + + def __call__(self, img_group, is_flow=False): + v = random.random() + if v < 0.5: + ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group] + if self.is_flow: + for i in range(0, len(ret), 2): + # invert flow pixel values when flipping + ret[i] = ImageOps.invert(ret[i]) + return ret + else: + return img_group + + +class GroupNormalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, tensor): + rep_mean = self.mean * (tensor.size()[0] // len(self.mean)) + rep_std = self.std * (tensor.size()[0] // len(self.std)) + + # TODO: make efficient + for t, m, s in zip(tensor, rep_mean, rep_std): + t.sub_(m).div_(s) + + return tensor + + +class GroupScale(object): + """ Rescales the input PIL.Image to the given 'size'. + 'size' will be the size of the smaller edge. + For example, if height > width, then image will be + rescaled to (size * height / width, size) + size: size of the smaller edge + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, interpolation=Image.BILINEAR): + self.worker = torchvision.transforms.Resize(size, interpolation) + + def __call__(self, img_group): + return [self.worker(img) for img in img_group] + + +class GroupOverSample(object): + def __init__(self, crop_size, scale_size=None, flip=True): + self.crop_size = crop_size if not isinstance( + crop_size, int) else (crop_size, crop_size) + + if scale_size is not None: + self.scale_worker = GroupScale(scale_size) + else: + self.scale_worker = None + self.flip = flip + + def __call__(self, img_group): + + if self.scale_worker is not None: + img_group = self.scale_worker(img_group) + + image_w, image_h = img_group[0].size + crop_w, crop_h = self.crop_size + + offsets = GroupMultiScaleCrop.fill_fix_offset( + False, image_w, image_h, crop_w, crop_h) + oversample_group = list() + for o_w, o_h in offsets: + normal_group = list() + flip_group = list() + for i, img in enumerate(img_group): + crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) + normal_group.append(crop) + flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) + + if img.mode == 'L' and i % 2 == 0: + flip_group.append(ImageOps.invert(flip_crop)) + else: + flip_group.append(flip_crop) + + oversample_group.extend(normal_group) + if self.flip: + oversample_group.extend(flip_group) + return oversample_group + + +class GroupFullResSample(object): + def __init__(self, crop_size, scale_size=None, flip=True): + self.crop_size = crop_size if not isinstance( + crop_size, int) else (crop_size, crop_size) + + if scale_size is not None: + self.scale_worker = GroupScale(scale_size) + else: + self.scale_worker = None + self.flip = flip + + def __call__(self, img_group): + + if self.scale_worker is not None: + img_group = self.scale_worker(img_group) + + image_w, image_h = img_group[0].size + crop_w, crop_h = self.crop_size + + w_step = (image_w - crop_w) // 4 + h_step = (image_h - crop_h) // 4 + + offsets = list() + offsets.append((0 * w_step, 2 * h_step)) # left + offsets.append((4 * w_step, 2 * h_step)) # right + offsets.append((2 * w_step, 2 * h_step)) # center + + oversample_group = list() + for o_w, o_h in offsets: + normal_group = list() + flip_group = list() + for i, img in enumerate(img_group): + crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h)) + normal_group.append(crop) + if self.flip: + flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT) + + if img.mode == 'L' and i % 2 == 0: + flip_group.append(ImageOps.invert(flip_crop)) + else: + flip_group.append(flip_crop) + + oversample_group.extend(normal_group) + oversample_group.extend(flip_group) + return oversample_group + + +class GroupMultiScaleCrop(object): + + def __init__(self, input_size, scales=None, max_distort=1, + fix_crop=True, more_fix_crop=True): + self.scales = scales if scales is not None else [1, .875, .75, .66] + self.max_distort = max_distort + self.fix_crop = fix_crop + self.more_fix_crop = more_fix_crop + self.input_size = input_size if not isinstance(input_size, int) else [ + input_size, input_size] + self.interpolation = Image.BILINEAR + + def __call__(self, img_group): + + im_size = img_group[0].size + + crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size) + crop_img_group = [ + img.crop( + (offset_w, + offset_h, + offset_w + crop_w, + offset_h + crop_h)) for img in img_group] + ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation) + for img in crop_img_group] + return ret_img_group + + def _sample_crop_size(self, im_size): + image_w, image_h = im_size[0], im_size[1] + + # find a crop size + base_size = min(image_w, image_h) + crop_sizes = [int(base_size * x) for x in self.scales] + crop_h = [ + self.input_size[1] if abs( + x - self.input_size[1]) < 3 else x for x in crop_sizes] + crop_w = [ + self.input_size[0] if abs( + x - self.input_size[0]) < 3 else x for x in crop_sizes] + + pairs = [] + for i, h in enumerate(crop_h): + for j, w in enumerate(crop_w): + if abs(i - j) <= self.max_distort: + pairs.append((w, h)) + + crop_pair = random.choice(pairs) + if not self.fix_crop: + w_offset = random.randint(0, image_w - crop_pair[0]) + h_offset = random.randint(0, image_h - crop_pair[1]) + else: + w_offset, h_offset = self._sample_fix_offset( + image_w, image_h, crop_pair[0], crop_pair[1]) + + return crop_pair[0], crop_pair[1], w_offset, h_offset + + def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h): + offsets = self.fill_fix_offset( + self.more_fix_crop, image_w, image_h, crop_w, crop_h) + return random.choice(offsets) + + @staticmethod + def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h): + w_step = (image_w - crop_w) // 4 + h_step = (image_h - crop_h) // 4 + + ret = list() + ret.append((0, 0)) # upper left + ret.append((4 * w_step, 0)) # upper right + ret.append((0, 4 * h_step)) # lower left + ret.append((4 * w_step, 4 * h_step)) # lower right + ret.append((2 * w_step, 2 * h_step)) # center + + if more_fix_crop: + ret.append((0, 2 * h_step)) # center left + ret.append((4 * w_step, 2 * h_step)) # center right + ret.append((2 * w_step, 4 * h_step)) # lower center + ret.append((2 * w_step, 0 * h_step)) # upper center + + ret.append((1 * w_step, 1 * h_step)) # upper left quarter + ret.append((3 * w_step, 1 * h_step)) # upper right quarter + ret.append((1 * w_step, 3 * h_step)) # lower left quarter + ret.append((3 * w_step, 3 * h_step)) # lower righ quarter + + return ret + + +class GroupRandomSizedCrop(object): + """Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size + and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio + This is popularly used to train the Inception networks + size: size of the smaller edge + interpolation: Default: PIL.Image.BILINEAR + """ + + def __init__(self, size, interpolation=Image.BILINEAR): + self.size = size + self.interpolation = interpolation + + def __call__(self, img_group): + for attempt in range(10): + area = img_group[0].size[0] * img_group[0].size[1] + target_area = random.uniform(0.08, 1.0) * area + aspect_ratio = random.uniform(3. / 4, 4. / 3) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if random.random() < 0.5: + w, h = h, w + + if w <= img_group[0].size[0] and h <= img_group[0].size[1]: + x1 = random.randint(0, img_group[0].size[0] - w) + y1 = random.randint(0, img_group[0].size[1] - h) + found = True + break + else: + found = False + x1 = 0 + y1 = 0 + + if found: + out_group = list() + for img in img_group: + img = img.crop((x1, y1, x1 + w, y1 + h)) + assert (img.size == (w, h)) + out_group.append( + img.resize( + (self.size, self.size), self.interpolation)) + return out_group + else: + # Fallback + scale = GroupScale(self.size, interpolation=self.interpolation) + crop = GroupRandomCrop(self.size) + return crop(scale(img_group)) + + +class ConvertDataFormat(object): + def __init__(self, model_type): + self.model_type = model_type + + def __call__(self, images): + if self.model_type == '2D': + return images + tc, h, w = images.size() + t = tc // 3 + images = images.view(t, 3, h, w) + images = images.permute(1, 0, 2, 3) + return images + + +class Stack(object): + + def __init__(self, roll=False): + self.roll = roll + + def __call__(self, img_group): + if img_group[0].mode == 'L': + return np.concatenate([np.expand_dims(x, 2) + for x in img_group], axis=2) + elif img_group[0].mode == 'RGB': + if self.roll: + return np.concatenate([np.array(x)[:, :, ::-1] + for x in img_group], axis=2) + else: + # print(np.concatenate(img_group, axis=2).shape) + # print(img_group[0].shape) + return np.concatenate(img_group, axis=2) + + +class ToTorchFormatTensor(object): + """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] + to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """ + + def __init__(self, div=True): + self.div = div + + def __call__(self, pic): + if isinstance(pic, np.ndarray): + # handle numpy array + img = torch.from_numpy(pic).permute(2, 0, 1).contiguous() + else: + # handle PIL Image + img = torch.ByteTensor( + torch.ByteStorage.from_buffer( + pic.tobytes())) + img = img.view(pic.size[1], pic.size[0], len(pic.mode)) + # put it from HWC to CHW format + # yikes, this transpose takes 80% of the loading time/CPU + img = img.transpose(0, 1).transpose(0, 2).contiguous() + return img.float().div(255) if self.div else img.float() + + +class IdentityTransform(object): + + def __call__(self, data): + return data diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vcrbench/cau_total.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vcrbench/cau_total.py new file mode 100644 index 0000000000000000000000000000000000000000..330f73bf82e4324ce7011ea21cd0bb3cebcf3ccb --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vcrbench/cau_total.py @@ -0,0 +1,89 @@ +from collections import defaultdict +import json +import sys + + +def load_data(file_path): + with open(file_path, 'r', encoding='utf-8') as f: + return json.load(f) + + +def calculate_metrics(data_pre, data_recall): + total_stats = defaultdict(lambda: { + "precision_sum": 0, "precision_count": 0, + "recall_sum": 0, "recall_count": 0, + "efficiency_sum": 0, "efficiency_count": 0 + }) + + for item in data_pre: + for metric in ["Video", "logic", "overall"]: + precision = item.get(f"{metric}_precision", '') + + if precision and precision != '': + total_stats[metric]["precision_sum"] += precision + total_stats[metric]["precision_count"] += 1 + + for item in data_recall: + for metric in ["Video", "logic", "overall"]: + recall = item.get(f"{metric}_recall", '') + + if recall and recall != '': + total_stats[metric]["recall_sum"] += recall + total_stats[metric]["recall_count"] += 1 + + overall_metrics = {} + for metric, stats in total_stats.items(): + precision = stats["precision_sum"] / stats["precision_count"] if stats["precision_count"] > 0 else 0 + recall = stats["recall_sum"] / stats["recall_count"] if stats["recall_count"] > 0 else 0 + f1 = (2 * precision * recall) / (precision + recall) if (precision + recall) > 0 else 0 + overall_metrics[metric] = { + "precision": precision, + "recall": recall, + "f1": f1 + } + + return { + "overall_metrics": overall_metrics + } + + +def format_metrics_results(results): + def format_dict(d): + return {k: f"{v:.3f}" if isinstance(v, (int, float)) else v for k, v in d.items()} + + return { + "Overall Metrics": {metric: format_dict(stats) for metric, stats in results["overall_metrics"].items()} + } + + +def print_results(formatted_results, txt_file): + mapping = {"Video": "Perception", "logic": "Reasoning", "overall": "Overall"} + print("===== Metrics Summary =====") + print("Overall Metrics:") + for metric, stats in formatted_results["Overall Metrics"].items(): + print(f" {mapping[metric]}:") + for key, value in stats.items(): + print(f" {key}: {value}") + + mapping = {"Video": "Perception", "logic": "Reasoning", "overall": "Overall"} + + with open(txt_file, 'w') as file: + file.write("===== Metrics Summary =====\n") + file.write("Overall Metrics:\n") + + for metric, stats in formatted_results["Overall Metrics"].items(): + file.write(f" {mapping[metric]}:\n") + for key, value in stats.items(): + file.write(f" {key}: {value}\n") + + +def calu_pre_recall(pre_file, recall_file, txt_file): + # Load and process data + data_pre = load_data(pre_file) + data_recall = load_data(recall_file) + + results = calculate_metrics(data_pre, data_recall) + formatted_results = format_metrics_results(results) + + # Print results + print_results(formatted_results, txt_file) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vcrbench/eval.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vcrbench/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..65b19153dbbb6d2e0c457eb2ea8d162621119111 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vcrbench/eval.py @@ -0,0 +1,234 @@ +import json +import copy +import os +import pandas as pd +import re +import ast +from pathlib import Path + + +def read_json(file_path): + with open(file_path, 'r', encoding="utf-8") as file: + data = json.load(file) + return data + + +def read_jsonl(file_path): + data = [] + with open(file_path, 'r', encoding="utf-8") as file: + for line in file: + data.append(json.loads(line.strip())) + return data + + +def save_json(data, file_path, indent=4): + with open(file_path, 'w', encoding="utf-8") as file: + json.dump(data, file, ensure_ascii=False, indent=indent) + + +def save_jsonl(data, file_path): + with open(file_path, 'w', encoding="utf-8") as file: + for item in data: + json.dump(item, file, ensure_ascii=False) + file.write("\n") + + +def calculate_time_iou(interval1, interval2): + + start1, end1 = interval1 + start2, end2 = interval2 + + intersection_start = max(start1, start2) + intersection_end = min(end1, end2) + intersection = max(0, intersection_end - intersection_start) + + union_start = min(start1, start2) + union_end = max(end1, end2) + union = union_end - union_start + + if union == 0: + return 0 + iou = intersection / union + return iou + + +def is_valid_time_interval(interval_str): + + try: + interval = ast.literal_eval(interval_str) + if isinstance(interval, list) and len(interval) == 2: + if all(isinstance(x, (int, float)) for x in interval): + return True + return False + except (ValueError, SyntaxError): + return False + + +def is_valid_space_interval(s): + if not (s.startswith('[') and s.endswith(']')): + return False + content = s[1:-1] + parts = content.split(',') + if len(parts) != 4: + return False + for part in parts: + try: + int(part.strip()) + except ValueError: + return False + return True + + +def string_to_list(s): + content = s[1:-1] + return [int(part.strip()) for part in content.split(',')] + + +def extract_json_between_backticks(s): + # pattern = r'```json\n(.*?)```' + # match = re.search(pattern, s, re.DOTALL) + # if not match: + # raise ValueError("No JSON content wrapped by ``` was found.") + # json_str = match.group(1).strip() + json_str = s + + try: + json.loads(json_str) + return json_str + except json.JSONDecodeError as e: + raise ValueError(f"Extracted content is not valid JSON: {e}") + + +def calculate_recall(json_object): + + stats = { + "Video Description Steps": {"Matched": 0, "Unmatched": 0}, + "Logical Inference Steps": {"Matched": 0, "Unmatched": 0}, + "Background Review Steps": {"Matched": 0, "Unmatched": 0} + } + for item in json_object: + step_type = item["step_type"] + judgement = item["judgment"] + stats[step_type][judgement] += 1 + + return stats + + +def calculate_space_iou(box1, box2): + + x1_1, y1_1, x2_1, y2_1 = box1 + x1_2, y1_2, x2_2, y2_2 = box2 + + x1_inter = max(x1_1, x1_2) + y1_inter = max(y1_1, y1_2) + x2_inter = min(x2_1, x2_2) + y2_inter = min(y2_1, y2_2) + + if x2_inter < x1_inter or y2_inter < y1_inter: + return 0.0 + + inter_area = (x2_inter - x1_inter) * (y2_inter - y1_inter) + + area1 = (x2_1 - x1_1) * (y2_1 - y1_1) + area2 = (x2_2 - x1_2) * (y2_2 - y1_2) + + union_area = area1 + area2 - inter_area + + iou = inter_area / union_area + return iou + + +def calculate_precision(json_object): + stats = { + "Video Description Steps": {"Matched": 0, "Wrong": 0, "Redundant": 0}, + "Logical Inference Steps": {"Matched": 0, "Wrong": 0, "Redundant": 0}, + "Background Review Steps": {"Matched": 0, "Wrong": 0, "Redundant": 0} + } + for item in json_object: + step_type = item["step_type"] + judgement = item["judgment"] + stats[step_type][judgement] += 1 + + return stats + + +def recall(item): + processed_item = copy.deepcopy(item) + + json_object = json.loads(extract_json_between_backticks(processed_item['recall_eval'])) + stats = calculate_recall(json_object) + + Video_recall = "" if (stats['Video Description Steps']['Matched'] + stats['Video Description Steps']['Unmatched']) == 0 else stats['Video Description Steps']['Matched'] / (stats['Video Description Steps']['Matched'] + stats['Video Description Steps']['Unmatched']) # noqa: E501 + + logic_recall = "" if (stats['Logical Inference Steps']['Matched'] + stats['Logical Inference Steps']['Unmatched']) == 0 else stats['Logical Inference Steps']['Matched'] / (stats['Logical Inference Steps']['Matched'] + stats['Logical Inference Steps']['Unmatched']) # noqa: E501 + + background_recall = "" if (stats['Background Review Steps']['Matched'] + stats['Background Review Steps']['Unmatched']) == 0 else stats['Background Review Steps']['Matched'] / (stats['Background Review Steps']['Matched'] + stats['Background Review Steps']['Unmatched']) # noqa: E501 + + processed_item['Video_recall'] = Video_recall + processed_item['logic_recall'] = logic_recall + processed_item['background_recall'] = background_recall + + total_matched = ( + stats['Video Description Steps']['Matched'] + + stats['Logical Inference Steps']['Matched'] + ) + + total_steps = ( + (stats['Video Description Steps']['Matched'] + stats['Video Description Steps']['Unmatched']) + + (stats['Logical Inference Steps']['Matched'] + stats['Logical Inference Steps']['Unmatched']) + ) + + if total_steps == 0: + overall_recall = "" + else: + overall_recall = total_matched / total_steps + + processed_item['overall_recall'] = overall_recall + + return processed_item + + +def precision(item): + processed_item = copy.deepcopy(item) + + json_object = json.loads(extract_json_between_backticks(processed_item['precision_eval'])) + stats = calculate_precision(json_object) + + Video_precision = "" if (stats['Video Description Steps']['Matched'] + stats['Video Description Steps']['Wrong']) == 0 else stats['Video Description Steps']['Matched'] / (stats['Video Description Steps']['Matched'] + stats['Video Description Steps']['Wrong']) # noqa: E501 + + logic_precision = "" if (stats['Logical Inference Steps']['Matched'] + stats['Logical Inference Steps']['Wrong']) == 0 else stats['Logical Inference Steps']['Matched'] / (stats['Logical Inference Steps']['Matched'] + stats['Logical Inference Steps']['Wrong']) # noqa: E501 + + background_precision = "" if (stats['Background Review Steps']['Matched'] + stats['Background Review Steps']['Wrong']) == 0 else stats['Background Review Steps']['Matched'] / (stats['Background Review Steps']['Matched'] + stats['Background Review Steps']['Wrong']) # noqa: E501 + + processed_item['Video_precision'] = Video_precision + processed_item['logic_precision'] = logic_precision + processed_item['background_precision'] = background_precision + + total_matched = ( + stats['Video Description Steps']['Matched'] + + stats['Logical Inference Steps']['Matched'] + ) + + total_wrong = ( + stats['Video Description Steps']['Wrong'] + + stats['Logical Inference Steps']['Wrong'] + ) + + if (total_matched + total_wrong) == 0: + overall_precision = "" + else: + overall_precision = total_matched / (total_matched + total_wrong) + + processed_item['overall_precision'] = overall_precision + + total_step_num = 0 + for counts in stats.values(): + total_step_num += sum(counts.values()) + + redundant_num = 0 + for counts in stats.values(): + redundant_num += counts['Redundant'] + + efficiency = (total_step_num - redundant_num) / total_step_num + processed_item['efficiency'] = efficiency + return processed_item diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/vcrbench/prompt.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vcrbench/prompt.py new file mode 100644 index 0000000000000000000000000000000000000000..ff07371e65bb9abba112abd067123b950b7707de --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/vcrbench/prompt.py @@ -0,0 +1,271 @@ +# flake8: noqa +Recall_Evaluation_Prompt = """You are an expert system for verifying solutions to video-based problems. Your task is to match the ground truth middle steps with the provided solution. + +INPUT FORMAT: +1. Problem: The original question/task +2. A Solution of a model +3. Ground Truth: Essential steps required for a correct answer + +MATCHING PROCESS: + +You need to match each ground truth middle step with the solution: + +Match Criteria: +- The middle step should exactly match in the content or is directly entailed by a certain content in the solution +- All the details must be matched, including the specific value and content +- You should judge all the middle steps for whethere there is a match in the solution + +Step Types: +1. Logical Inference Steps + - Contains exactly one logical deduction + - Must produce a new derived conclusion + - Cannot be just a summary or observation + +2. Video Description Steps + - Pure visual observations + - Only includes directly visible elements + - No inferences or assumptions + - Contains event time + +3. Background Review Steps: + - Repetition or review of the problem + - Not directly related to solving the problem. + +OUTPUT FORMAT: +JSON array of judgments: +[ + {{ + "step": ground truth middle step, + "step_type": "Video Description Steps|Logical Inference Steps|Background Review Steps", + "judgment": "Matched" | "Unmatched", + }} +] + +ADDITIONAL RULES: +1. Only output the json array with no additional information. +2. Judge each ground truth middle step in order without omitting any step. + +Here is the problem, answer, solution, and the ground truth middle steps: +""" + +Precision_Evaluation_Prompt = """ +# Task Overview +Given a solution with multiple reasoning steps for an video-based problem, reformat it into well-structured steps and evaluate their correctness. + +# Step 1: Reformatting the Solution +Convert the unstructured solution into distinct reasoning steps while: +- Preserving all original content and order +- Not adding new interpretations +- Not omitting any steps + +## Step Types +1. Logical Inference Steps + - Contains exactly one logical deduction + - Must produce a new derived conclusion + - Cannot be just a summary or observation + +2. Video Description Steps + - Pure visual observations + - Only includes directly visible elements + - No inferences or assumptions + - Contains event time + +3. Background Review Steps: + - Repetition or review of the problem + - Not directly related to solving the problem. + +## Step Requirements +- Each step must be atomic (one conclusion per step) +- No content duplication across steps +- Initial analysis counts as background information +- Final answer determination counts as logical inference + +# Step 2: Evaluating Correctness +Evaluate each step against: + +## Ground Truth Matching +For video descriptions: +- Key elements must match ground truth descriptions + +For logical inferences: +- Conclusion must EXACTLY match or be DIRECTLY entailed by ground truth + +For Background review: +- Without special circumstances are deemed to be redundant + +## Reasonableness Check (if no direct match) +If Step: +- Premises must not contradict any ground truth or correct answer +- Logic is valid +- Conclusion must not contradict any ground truth +- Conclusion must support or be neutral to correct answer +- Helpful in solving the problem, non-redundant steps +this Step be viewed as matched. + +## Judgement Categories +- "Match": Aligns with ground truth +- "Wrong": Contradictory with ground truth +- "Redundant": Redundant steps that do not help solve the problem + +# Output Requirements +1. The output format MUST be in valid JSON format without ANY other content. +2. For highly repetitive patterns, output it as a single step. +3. Output maximum 35 steps. Always include the final step that contains the answer. + +Here is the json output format: +## Output Format +[ + {{ + "step": "reformatted the solution step", + "step_type": "Video Description Steps|Logical Inference Steps|Background Review Steps", + "reasons_for_judgment": "The reason for judging the matching result of the step in the solution based on Ground Truth Information. Sufficient evidence needs to be found in Ground Truth Information to determine the correctness of the reformatted the solution step. The video event description time error is no more than 3 seconds and is considered correct. If the solution step does not specify the time, it is considered wrong.", + "judgment": "Matched|Wrong|Redundant", + }} +] + +Here is the problem, and the solution that needs to be reformatted to steps: + +""" + +Answer_Extraction_Prompt_part1 = """You are an AI assistant who will help me to extract an answer of a question. You are provided with a question and a response, and you need to find the final answer of the question. + +Extract Rule: +[Multiple choice question] +1. The answer could be answering the option letter or the value. You should directly output the choice letter of the answer. +2. You should output a single uppercase character in A, B, C, D, E, F, G, H, I (if they are valid options), and Z. +3. If the meaning of all options are significantly different from the final answer, output Z. +[Non Multiple choice question] +1. Output the final value of the answer. It could be hidden inside the last step of calculation or inference. Pay attention to what the question is asking for to extract the value of the answer. +2. The final answer could also be a short phrase or sentence. +3. If the answer is about a certain time period, such as from 1 minute 30 seconds to 2 minutes 30 seconds, it should be given in the format [90, 150]. +4. If the response doesn't give a final answer, output Z. + +Output Format: +Directly output the extracted answer of the response + +Example 1: +Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog +Response: a cute teddy bear +Your output: A +Example 2: +Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog +Answer: Spider +Your output: Z +Example 3: +Question: On a flat playground, choose a Cartesian Oxy coordinate system (in unit of meters). A child running at a constant velocity $V=1 \\mathrm{~m} / \\mathrm{s}$ around a heart-shaped path satisfies the following order- 6 algebraic equation:\n$$\n\\left(x^{2}+y^{2}-L^{2}\\right)^{3}-L x^{2} y^{3}=0, L=10 \\text {. }\n$$\nWhen the child is at the position $(x, y)=(L, 0)$, what is the magnitude of their acceleration?\n\n +Response: fore, \\(r \\approx 10 \\, \\text{m}\\).\n\n7. **Calculating Acceleration**: Using the radius of curvature:\n \\[\n a_c = \\frac{V^2}{r} = \\frac{(1 \\, \\text{m/s})^2}{10 \\, \\text{m}} = 0.1 \\, \\text{m/s}^2\n \\]\n\n8. **Magnitude of Acceleration**: The magnitude of the acceleration is the sum of the tangential and centripetal accelerations. Since the tangential acceleration is zero, the magnitude of the acceleration is purely centripetal:\n \\[\n \\text{Magnitude of acceleration} = a_c = 0.1 \\, \\text{m/s}^2\n \\]\n\nAnswer: \\boxed{0.1 \\, \\text{m/s}^2} +Your output: 0.1 text{m/s}^2 +Example 4: +Question: In the diagram, $O A=15, O P=9$ and $P B=4$. Determine the equation of the line through $A$ and $B$. Explain how you got your answer. + + +Response: The equation of the line through $A$ and $B$ is $y = -\frac{1}{3}x + 12$. + +To find the equation of the line through $A$ and $B$, we first need to find the slope of the line. The slope of a line passing through two points $(x_1, y_1)$ and $(x_2, y_2)$ is given by the formula: + +$$\text{Slope} = \frac{y_2 - y_1}{x_2 - x_1}$$ + +Using the coordinates of points $A(0, 15)$ and $B(4, 0)$, we can calculate the slope as follows: + +$$\text{Slope} = \frac{0 - 15}{4 - 0} = \frac{-15}{4} = -\frac{15}{4}$$ + +Now that we have the slope, we can use the point-slope form of a linear equation to find the equation of the line. The point-slope form is given by: + +$$y - y_1 = m(x - x_1)$$ + +where $(x_1, y_1)$ is a point on the line and $m$ is the slope. In this case, we can use point $A(0, 15)$ and the slope $-\frac{15}{4}$: + +$$y - 15 = -\frac{15}{4}(x - 0)$$ + +Simplifying the equation, we get: + +$$y - 15 = -\frac{15}{4}x$$ + +$$y = -\frac{15}{4}x + 15$$ + +Therefore, the equation of the line through $A$ and $B$ is $y = -\frac{15}{4}x + 15$. +Your output: $y = -\frac{15}{4}x + 15$ +Example 5: +Question: In the paper \'An Object is Worth 64x64 Pixels: Generating 3D Object via Image Diffusion\', what aspect of the proposed method does this figure aim to demonstrate? +Response: the potential of this 3D generation paradigm to generate objects with PBR materials +Your output: the potential of this 3D generation paradigm to generate objects with PBR materials +Example 6: +Question: Please specify the start and end time of the event 'An intro screen appears with a gray background and the red and white words that say PRO TRAINING.' +Response: The start and end time of the event 'An intro screen appears with a gray background and the red and white words that say PRO TRAINING.' are as follows:\n\n**Step 1: Identify the start time of the event.**\nThe video starts at 00:00. The intro screen is visible from the very beginning.\n\n**Start time:** 00:00\n\n**Step 2: Identify the end time of the event.**\nThe intro screen starts to disappear and transition to the next scene at 00:11. By 00:12, the intro screen is completely gone. So, the intro screen is visible until 00:11.\n\n**End time:** 00:11\n\nTherefore, the event 'An intro screen appears with a gray background and the red and white words that say PRO TRAINING' starts at **00:00** and ends at **00:11**.\n\n**Answer:**\nStart time: 00:00\nEnd time: 00:11 +Your output: [0,11] + +""" + +Answer_Extraction_Prompt_part2 = """ +Question: {question} +Answer: {response} +Your output: +""" + +Answer_Scoring_Prompt_part1 = r"""You are an AI assistant who will help me to judge whether two answers are consistent. + +Input Illustration: +[Standard Answer] is the standard answer to the question +[Model Answer] is the answer extracted from a model's output to this question. + +Task Illustration: +Determine whether [Standard Answer] and [Model Answer] are consistent. +Consistent Criteria: +[Multiple-Choice questions] +1. If the [Model Answer] is the option letter, then it must completely matches the [Standard Answer]. +2. If the [Model Answer] is not an option letter, then the [Model Answer] must completely match the option content of [Standard Answer]. +[Nan-Multiple-Choice questions] +1. The [Model Answer] and [Standard Answer] should exactly match. +2. If the meaning is expressed in the same way, it is also considered consistent, for example, 0.5m and 50cm. + +Output Format: +1. If they are consistent, output 1; if they are different, output 0. +2. DIRECTLY output 1 or 0 without any other content. + +Example 1: +Question: What is the main object in image?\nOptions: A. teddy bear B. rabbit C. cat D. dog +[Model Answer]: a cute teddy bear +[Standard Answer]: A +Your output: 1 + +Example 2: +Question: Find the value of AB. Choices: A.1;B.5;C.9;D.10 +[Model Answer]: \\boxed{5} +[Standard Answer]: B +Your output: 1 + +Example 3: +Question: Three of the following four slides are from the same presentation, but one is from a different one. Please identify the outlier: \n\n \nA. the forth image\nB. the second image\nC. the third image\nD. None of the choices provided +[Model Answer]: \\boxed{B} +[Standard Answer]: A +Your output: 0 + + +""" + +Answer_Scoring_Prompt_part2 = """ +Question: {question} +[Model Answer]: {extract_answer} +[Standard Answer]: {gt_answer} +Your output: +""" + + +def build_Extraction_prompt(item): + tmpl = 'Question: {question}\nAnswer: {response}\nYour output:' + return tmpl.format(question=item['question'], response=item['prediction']) + + +def build_Scoring_prompt(item): + tmpl = 'Question: {question}\n[Model Answer]: {extract_answer}\n[Standard Answer]: {gt_answer}\nYour output:' + return tmpl.format(question=item['question'], extract_answer=item['extracted_answer'], gt_answer=item['answer']) + + +def build_Precision_prompt(item): + tmpl = '[Problem]:{question}\n[Solution]:{solution}\n[Ground Truth Information]:{gt_annotation}' + return tmpl.format(question=item['question'], solution=item['prediction'], gt_annotation=item['reasoning']) + + +def build_Recall_prompt(item): + tmpl = '[Problem]:{question}\n[Answer]:{answer}\n[Solution]:{solution}\n[Ground Truth Information]:{gt_annotation}' + return tmpl.format(question=item['question'], answer=item['answer'], solution=item['prediction'], gt_annotation=item['reasoning']) diff --git a/VLMEvalKit-sudoku/vlmeval/dataset/utils/video_mmlu.py b/VLMEvalKit-sudoku/vlmeval/dataset/utils/video_mmlu.py new file mode 100644 index 0000000000000000000000000000000000000000..4d23b17f2405da03b20e15dccf33c129c1935c57 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/dataset/utils/video_mmlu.py @@ -0,0 +1,161 @@ +# flake8: noqa +from ...smp import * +import numpy as np +import pandas as pd + +FAIL_MSG = 'Failed to obtain answer via API.' + +SYSTEM_CAL_SCORE_PROMPT_CAP = """ + You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. + Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. The evaluation criteria differ based on the type of question: + ------ + ## INSTRUCTIONS: + 1. For **OCR-related questions**: + - Perform a strict letter-by-letter comparison. + - Any difference in characters (including case, punctuation, or letter substitution) must result in 'no'. + - Minor spelling errors or missing characters should not be accepted. + 2. For **non-OCR-related questions**: + - Focus on the meaningful match between the predicted answer and the correct answer. + - Synonyms or paraphrases can be considered valid matches. + - Minor spelling differences or alternative expressions should not be penalized. +""" + +SYSTEM_CAL_SCORE_PROMPT_QA = """ + You are an intelligent chatbot designed for evaluating the correctness of generative outputs for reasoning-based question-answer pairs. + Your task is to compare the predicted answer with the correct answer based on the following rules: + ------ + ## INSTRUCTIONS: + 1. **Evaluate Reasoning Tasks Strictly:** + - The predicted answer must capture all critical concepts and details mentioned in the correct answer. + - If the correct answer mentions specific concepts or examples (e.g., 'odd numbers accumulate to form perfect squares'), the predicted answer must include these concepts or examples. + - Even if the phrasing differs, the key meaning and concepts must be preserved. However, omitting or altering key concepts or examples is **not acceptable**. + - **Example 1:** If the correct answer is 'The construction method shows how odd numbers accumulate to form perfect squares,' the predicted answer must include 'odd numbers' and 'perfect squares'. + - **Example 2:** If the correct answer is 'To eliminate HBr and form an alkene,' the predicted answer must address the elimination of HBr as well. + - Minor differences in phrasing are acceptable as long as the key information is retained. + - **Critical Detail:** If any essential element (e.g., key terms, concepts, or examples) is missing from the predicted answer, the answer is considered incorrect. + - Do **not** introduce new, unrelated information in the predicted answer. +""" + + +SYSTEM_GENER_PRED_PROMPT = """You are an intelligent chatbot designed for providing accurate answers to questions related to the content based on a detailed description of a video or image. +Here's how you can accomplish the task: +------ +##INSTRUCTIONS: +- Read the detailed description carefully. +- Answer the question only based on the detailed description. +- The answer should be a short sentence or phrase. +""" + +USER_GENER_PRED_PROMPT = """Please provide accurate answers to questions related to the content based on a detailed description of a video or image: + +detailed description: {pred_cap} +question: {q} + +DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide short but accurate answer.""" + + +VIDEO_MMLU_DIMENSIONS = { + 'math': ['math'], + 'physics': ['physics'], + 'chemistry': ['chemistry'], + 'overall': [] +} + +L3_DIMS = [] +for k, v in VIDEO_MMLU_DIMENSIONS.items(): + if k != 'overall': + L3_DIMS.extend(v) + VIDEO_MMLU_DIMENSIONS['overall'].extend(v) + + +def get_dimension_rating(data_path): + data = load(data_path) + coarse_rating = {k: [] for k in VIDEO_MMLU_DIMENSIONS} + coarse_acc = {k: [] for k in VIDEO_MMLU_DIMENSIONS} + + def parse_score_dict(score_dict): + """Helper function to parse score dictionary string""" + if isinstance(score_dict, dict): + return score_dict + + if isinstance(score_dict, str): + try: + # First try standard json loading + return json.loads(score_dict) + except json.JSONDecodeError: + try: + # If that fails, try eval (safer than literal_eval for this case) + return eval(score_dict) + except: + print(f"Failed to parse score_dict: {score_dict}") + return None + return None + + for i in range(len(data)): + discipline = data.iloc[i]['discipline'].lower() # Convert to lowercase + score_dict = parse_score_dict(data.iloc[i]['score']) + + if score_dict and isinstance(score_dict, dict) and 'pred' in score_dict and 'score' in score_dict: + score = score_dict['score'] + is_correct = 1 if score_dict['pred'].lower() == 'yes' else 0 + else: + score = -1 + is_correct = -1 + + # Map caption types to their lowercase versions + if discipline in ['math', 'physics', 'chemistry']: + coarse_rating[discipline].append(score) + coarse_rating['overall'].append(score) + + if is_correct != -1: + coarse_acc[discipline].append(is_correct) + coarse_acc['overall'].append(is_correct) + + + coarse_valid = {k: f'{np.mean([x for x in v if x >= 0]):.2f}' for k, v in coarse_rating.items()} + coarse_accuracy = {k: f'{np.mean(v):.2f}' if v else '0.00' for k, v in coarse_acc.items()} + + return dict( + coarse_valid=coarse_valid, + coarse_accuracy=coarse_accuracy + ) + +def prepare_response_prompt(item): + """ + Prepare messages for response generation + + Args: + item: DataFrame row containing pred_cap and question + + Returns: + list: List of message dictionaries for the model + """ + return USER_GENER_PRED_PROMPT.format( + pred_cap=item['prediction'], + q=item['question']) + + +def prepare_score_prompt(item): + """ + Prepare messages for score evaluation + + Args: + item: DataFrame row containing question, answer, and prediction + + Returns: + list: List of message dictionaries for the model + """ + # Convert Series to dictionary if needed + if isinstance(item, pd.Series): + item = item.to_dict() + + prompt = f"""Please evaluate the following video-based question-answer pair:\n\n + Question: {item['question']}\n + Correct Answer: {item['answer']}\n + Predicted Answer: {item['pred_response']}\n\n + Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. + Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING. + DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. + For example, your response should look like this: {{'pred': 'yes', 'score': 4.8}}.""" + + return prompt diff --git a/VLMEvalKit-sudoku/vlmeval/inference.py b/VLMEvalKit-sudoku/vlmeval/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..ca7868043efea9331bbd1c921fd50ee54f05585e --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/inference.py @@ -0,0 +1,252 @@ +import torch +import torch.distributed as dist +from vlmeval.config import supported_VLM +from vlmeval.utils import track_progress_rich +from vlmeval.smp import * + +FAIL_MSG = 'Failed to obtain answer via API.' + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, nargs='+', required=True) + parser.add_argument('--model', type=str, nargs='+', required=True) + parser.add_argument('--nproc', type=int, default=4, required=True) + parser.add_argument('--verbose', action='store_true') + args = parser.parse_args() + return args + + +# Only API model is accepted +def infer_data_api(model, work_dir, model_name, dataset, index_set=None, api_nproc=4, ignore_failed=False): + rank, world_size = get_rank_and_world_size() + assert rank == 0 and world_size == 1 + dataset_name = dataset.dataset_name + data = dataset.data + if index_set is not None: + data = data[data['index'].isin(index_set)] + + model = supported_VLM[model_name]() if isinstance(model, str) else model + assert getattr(model, 'is_api', False) + if hasattr(model, 'set_dump_image'): + model.set_dump_image(dataset.dump_image) + + lt, indices = len(data), list(data['index']) + + structs = [] + for i in range(lt): + item = data.iloc[i] + if hasattr(model, 'use_custom_prompt') and model.use_custom_prompt(dataset_name): + assert hasattr(model, 'build_prompt') + struct = model.build_prompt(item, dataset=dataset_name) + else: + struct = dataset.build_prompt(item) + structs.append(struct) + + out_file = f'{work_dir}/{model_name}_{dataset_name}_supp.pkl' + + # To reuse records in MMBench_V11 + if dataset_name in ['MMBench', 'MMBench_CN']: + pred_format = get_pred_file_format() + v11_pred = f'{work_dir}/{model_name}_{dataset_name}_V11.{pred_format}' + if osp.exists(v11_pred): + try: + reuse_inds = load('http://opencompass.openxlab.space/utils/mmb_reuse.pkl') + data = load(v11_pred) + ans_map = {x: y for x, y in zip(data['index'], data['prediction']) if x in reuse_inds} + dump(ans_map, out_file) + except Exception as err: + print(type(err), err) + + res = {} + if osp.exists(out_file): + res = load(out_file) + if ignore_failed: + res = {k: v for k, v in res.items() if FAIL_MSG not in v} + + structs = [s for i, s in zip(indices, structs) if i not in res] + indices = [i for i in indices if i not in res] + + gen_func = model.generate + structs = [dict(message=struct, dataset=dataset_name) for struct in structs] + + if len(structs): + track_progress_rich(gen_func, structs, nproc=api_nproc, chunksize=api_nproc, save=out_file, keys=indices) + + res = load(out_file) + if index_set is not None: + res = {k: v for k, v in res.items() if k in index_set} + os.remove(out_file) + return res + + +def infer_data(model, model_name, work_dir, dataset, out_file, verbose=False, api_nproc=4, use_vllm=False): + dataset_name = dataset.dataset_name + prev_file = f'{work_dir}/{model_name}_{dataset_name}_PREV.pkl' + res = load(prev_file) if osp.exists(prev_file) else {} + if osp.exists(out_file): + res.update(load(out_file)) + + rank, world_size = get_rank_and_world_size() + sheet_indices = list(range(rank, len(dataset), world_size)) + lt = len(sheet_indices) + data = dataset.data.iloc[sheet_indices] + data_indices = [i for i in data['index']] + + # If finished, will exit without building the model + all_finished = True + for i in range(lt): + idx = data.iloc[i]['index'] + if idx not in res: + all_finished = False + if all_finished: + res = {k: res[k] for k in data_indices} + dump(res, out_file) + return model + + # Data need to be inferred + data = data[~data['index'].isin(res)] + lt = len(data) + + kwargs = {} + if model_name is not None and ( + 'Llama-4' in model_name + or 'Qwen2-VL' in model_name + or 'Qwen2.5-VL' in model_name + ): + kwargs = {'use_vllm': use_vllm} + + # (25.06.05) In newer version of transformers (after 4.50), with device_map='auto' and torchrun launcher, + # Transformers automatically adopt TP parallelism, which leads to compatibility problems with VLMEvalKit + # (In VLMEvalKit, we use torchrun to launch multiple model instances on a single node). + # To bypass this problem, we unset `WORLD_SIZE` before building the model to not use TP parallel. + ws_bak = os.environ.pop('WORLD_SIZE', None) + model = supported_VLM[model_name](**kwargs) if isinstance(model, str) else model + if ws_bak: + os.environ['WORLD_SIZE'] = ws_bak + + is_api = getattr(model, 'is_api', False) + if is_api: + lt, indices = len(data), list(data['index']) + supp = infer_data_api( + model=model, + work_dir=work_dir, + model_name=model_name, + dataset=dataset, + index_set=set(indices), + api_nproc=api_nproc) + for idx in indices: + assert idx in supp + res.update(supp) + res = {k: res[k] for k in data_indices} + dump(res, out_file) + return model + else: + model.set_dump_image(dataset.dump_image) + + for i in tqdm(range(lt), desc=f'Infer {model_name}/{dataset_name}, Rank {rank}/{world_size}'): + idx = data.iloc[i]['index'] + if idx in res: + continue + + if hasattr(model, 'use_custom_prompt') and model.use_custom_prompt(dataset_name): + struct = model.build_prompt(data.iloc[i], dataset=dataset_name) + else: + struct = dataset.build_prompt(data.iloc[i]) + + # If `SKIP_ERR` flag is set, the model will skip the generation if error is encountered + if os.environ.get('SKIP_ERR', False) == '1': + FAIL_MSG = 'Failed to obtain answer' + try: + response = model.generate(message=struct, dataset=dataset_name) + except RuntimeError as err: + torch.cuda.synchronize() + warnings.warn(f'{type(err)} {str(err)}') + response = f'{FAIL_MSG}: {type(err)} {str(err)}' + else: + response = model.generate(message=struct, dataset=dataset_name) + torch.cuda.empty_cache() + + if verbose: + print(response, flush=True) + + res[idx] = response + if (i + 1) % 10 == 0: + dump(res, out_file) + + res = {k: res[k] for k in data_indices} + dump(res, out_file) + return model + + +# A wrapper for infer_data, do the pre & post processing +def infer_data_job( + model, work_dir, model_name, dataset, verbose=False, api_nproc=4, ignore_failed=False, use_vllm=False +): + rank, world_size = get_rank_and_world_size() + dataset_name = dataset.dataset_name + # 使用环境变量控制的文件格式 + result_file = get_pred_file_path(work_dir, model_name, dataset_name, use_env_format=True) + + prev_file = f'{work_dir}/{model_name}_{dataset_name}_PREV.pkl' + if osp.exists(result_file): + if rank == 0: + data = load(result_file) + # breakpoint() + results = {k: v for k, v in zip(data['index'], data['prediction'])} + if not ignore_failed: + results = {k: v for k, v in results.items() if FAIL_MSG not in str(v)} + dump(results, prev_file) + if world_size > 1: + dist.barrier() + + tmpl = osp.join(work_dir, '{}' + f'{world_size}_{dataset_name}.pkl') + out_file = tmpl.format(rank) + + model = infer_data( + model=model, work_dir=work_dir, model_name=model_name, dataset=dataset, + out_file=out_file, verbose=verbose, api_nproc=api_nproc, use_vllm=use_vllm) + if world_size > 1: + dist.barrier() + + if rank == 0: + data_all = {} + for i in range(world_size): + data_all.update(load(tmpl.format(i))) + + data = dataset.data + for x in data['index']: + assert x in data_all + if os.getenv('SPLIT_THINK', False): + prediction = [str(data_all[x]) for x in data['index']] + + def split_thinking(s): + if '' in s: + splits = s.split('') + prediction = splits[-1].strip() + if len(splits) == 2 and '' in splits[0]: + thinking = splits[0].split('')[1].strip() + else: + thinking = ''.join(splits[:-1]) + thinking += '' + warnings.warn('Failed to parse thinking, multiple tags or missing tag.') + else: + thinking = '' + prediction = s + return (prediction, thinking) + split_func = model.split_thinking if hasattr(model, 'split_thinking') else split_thinking + print(f'Prediction format: {os.getenv("SPLIT_THINK")},splitting func: {split_func}') + tups = [split_func(x) for x in prediction] + data['prediction'] = [x[0] for x in tups] + data['thinking'] = [x[1] for x in tups] + else: + data['prediction'] = [str(data_all[x]) for x in data['index']] + if 'image' in data: + data.pop('image') + + dump(data, result_file) + for i in range(world_size): + os.remove(tmpl.format(i)) + if world_size > 1: + dist.barrier() + return model diff --git a/VLMEvalKit-sudoku/vlmeval/inference_mt.py b/VLMEvalKit-sudoku/vlmeval/inference_mt.py new file mode 100644 index 0000000000000000000000000000000000000000..25c7ce93520c408bc2f57e8708d146ca4ed2cd02 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/inference_mt.py @@ -0,0 +1,199 @@ +import torch +import torch.distributed as dist +from vlmeval.config import supported_VLM +from vlmeval.utils import track_progress_rich +from vlmeval.smp import * + +FAIL_MSG = 'Failed to obtain answer via API.' + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, nargs='+', required=True) + parser.add_argument('--model', type=str, nargs='+', required=True) + parser.add_argument('--nproc', type=int, default=4, required=True) + parser.add_argument('--verbose', action='store_true') + args = parser.parse_args() + return args + + +def chat_mt(model, messages, dataset_name): + assert len(messages) % 2 == 0 + nturn = len(messages) // 2 + utter_stack = [] + predictions = [] + + for i in range(nturn): + utter = messages[2 * i] + utter_stack.append(utter) + try: + resp = model.chat(utter_stack, dataset=dataset_name) + utter_stack.append(dict(role='assistant', content=resp)) + except Exception as e: + resp = FAIL_MSG + str(e) + utter_stack.append(dict(role='assistant', content=resp)) + predictions.append(resp) + return predictions + + +# Only API model is accepted +def infer_data_api(model, work_dir, model_name, dataset, index_set=None, api_nproc=4, ignore_failed=False): + rank, world_size = get_rank_and_world_size() + assert rank == 0 and world_size == 1 + dataset_name = dataset.dataset_name + data = dataset.data + if index_set is not None: + data = data[data['index'].isin(index_set)] + + model = supported_VLM[model_name]() if isinstance(model, str) else model + assert getattr(model, 'is_api', False) + assert hasattr(model, 'chat_inner') + + lt, indices = len(data), list(data['index']) + structs = [dataset.build_prompt(data.iloc[i]) for i in range(lt)] + + out_file = f'{work_dir}/{model_name}_{dataset_name}_supp.pkl' + res = {} + if osp.exists(out_file): + res = load(out_file) + if ignore_failed: + res = {k: v for k, v in res.items() if FAIL_MSG not in v} + + structs = [s for i, s in zip(indices, structs) if i not in res] + indices = [i for i in indices if i not in res] + + structs = [dict(model=model, messages=struct, dataset_name=dataset_name) for struct in structs] + + if len(structs): + track_progress_rich(chat_mt, structs, nproc=api_nproc, chunksize=api_nproc, save=out_file, keys=indices) + + res = load(out_file) + if index_set is not None: + res = {k: v for k, v in res.items() if k in index_set} + os.remove(out_file) + return res + + +def infer_data(model, model_name, work_dir, dataset, out_file, verbose=False, api_nproc=4, use_vllm=False): + dataset_name = dataset.dataset_name + res = {} + if osp.exists(out_file): + res.update(load(out_file)) + + rank, world_size = get_rank_and_world_size() + sheet_indices = list(range(rank, len(dataset), world_size)) + lt = len(sheet_indices) + data = dataset.data.iloc[sheet_indices] + data_indices = [i for i in data['index']] + + # If finished, will exit without building the model + all_finished = True + for i in range(lt): + idx = data.iloc[i]['index'] + if idx not in res: + all_finished = False + if all_finished: + res = {k: res[k] for k in data_indices} + dump(res, out_file) + return model + + # Data need to be inferred + data = data[~data['index'].isin(res)] + lt = len(data) + + kwargs = {} + if model_name is not None and ( + 'Llama-4' in model_name + or 'Qwen2-VL' in model_name + or 'Qwen2.5-VL' in model_name + ): + kwargs = {'use_vllm': use_vllm} + + # (25.06.05) In newer version of transformers (after 4.50), with device_map='auto' and torchrun launcher, + # Transformers automatically adopt TP parallelism, which leads to compatibility problems with VLMEvalKit + # (In VLMEvalKit, we use torchrun to launch multiple model instances on a single node). + # To bypass this problem, we unset `WORLD_SIZE` before building the model to not use TP parallel. + ws_bak = os.environ.pop('WORLD_SIZE', None) + model = supported_VLM[model_name](**kwargs) if isinstance(model, str) else model + if ws_bak: + os.environ['WORLD_SIZE'] = ws_bak + assert hasattr(model, 'chat_inner') + + is_api = getattr(model, 'is_api', False) + if is_api: + lt, indices = len(data), list(data['index']) + supp = infer_data_api( + model=model, + work_dir=work_dir, + model_name=model_name, + dataset=dataset, + index_set=set(indices), + api_nproc=api_nproc) + for idx in indices: + assert idx in supp + res.update(supp) + res = {k: res[k] for k in data_indices} + dump(res, out_file) + return model + else: + model.set_dump_image(dataset.dump_image) + + for i in tqdm(range(lt)): + idx = data.iloc[i]['index'] + if idx in res: + continue + + if hasattr(model, 'use_custom_prompt') and model.use_custom_prompt(dataset_name): + struct = model.build_prompt(data.iloc[i], dataset=dataset_name) + else: + struct = dataset.build_prompt(data.iloc[i]) + + response = chat_mt(model, struct, dataset_name) + torch.cuda.empty_cache() + + if verbose: + print(response, flush=True) + + res[idx] = response + if (i + 1) % 20 == 0: + dump(res, out_file) + + res = {k: res[k] for k in data_indices} + dump(res, out_file) + return model + + +# A wrapper for infer_data, do the pre & post processing +def infer_data_job_mt( + model, work_dir, model_name, dataset, verbose=False, api_nproc=4, ignore_failed=False, use_vllm=False +): + rank, world_size = get_rank_and_world_size() + dataset_name = dataset.dataset_name + result_file = get_pred_file_path(work_dir, model_name, dataset_name, use_env_format=True) + + tmpl = osp.join(work_dir, '{}' + f'{world_size}_{dataset_name}.pkl') + out_file = tmpl.format(rank) + + model = infer_data( + model=model, work_dir=work_dir, model_name=model_name, dataset=dataset, + out_file=out_file, verbose=verbose, api_nproc=api_nproc, use_vllm=use_vllm) + if world_size > 1: + dist.barrier() + + if rank == 0: + data_all = {} + for i in range(world_size): + data_all.update(load(tmpl.format(i))) + + data = dataset.data + for x in data['index']: + assert x in data_all + + data['prediction'] = [data_all[x] for x in data['index']] + if 'image' in data: + data.pop('image') + + dump(data, result_file) + for i in range(world_size): + os.remove(tmpl.format(i)) + return model diff --git a/VLMEvalKit-sudoku/vlmeval/inference_video.py b/VLMEvalKit-sudoku/vlmeval/inference_video.py new file mode 100644 index 0000000000000000000000000000000000000000..54bb790c0ef889a4c142ab1a0f3c056e8d09b201 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/inference_video.py @@ -0,0 +1,259 @@ +import torch +import torch.distributed as dist +from vlmeval.config import supported_VLM +from vlmeval.utils import track_progress_rich +from vlmeval.smp import * + +FAIL_MSG = 'Failed to obtain answer via API.' + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, nargs='+', required=True) + parser.add_argument('--model', type=str, nargs='+', required=True) + parser.add_argument('--nproc', type=int, default=4, required=True) + parser.add_argument('--verbose', action='store_true') + args = parser.parse_args() + return args + + +# Only API model is accepted +def infer_data_api(model, work_dir, model_name, dataset, samples_dict={}, api_nproc=4): + rank, world_size = get_rank_and_world_size() + assert rank == 0 and world_size == 1 + dataset_name = dataset.dataset_name + model = supported_VLM[model_name]() if isinstance(model, str) else model + assert getattr(model, 'is_api', False) + + indices = list(samples_dict.keys()) + if getattr(model,'backend', None) == 'genai': + if dataset.nframe > 0: + print( + 'Gemini model (with genai backend) does not support nframe, ' + 'will set its VIDEO_LLM to False to enable multi-image input for video.' + ) + setattr(model, 'VIDEO_LLM', False) + else: + print('Gemini model (with genai backend) is a video-llm, ' + 'will reset fps setting in model to match the dataset.') + setattr(model, 'fps', dataset.fps) + print(f'The fps is set to {dataset.fps} for the model {model_name}.') + elif getattr(model,'backend', None) == 'vertex': + print('Gemini model (with vertex backend) does not support video input, ' + 'will set its VIDEO_LLM to False to enable multi-image input for video.') + setattr(model, 'VIDEO_LLM', False) + + packstr = 'pack' if getattr(dataset, 'pack', False) else 'nopack' + build_prompt_input = [(samples_dict[idx], getattr(model, 'VIDEO_LLM', False)) for idx in indices] + if dataset.nframe > 0: + struct_tmp_file = f'{work_dir}/{model_name}_{dataset_name}_{dataset.nframe}frame_{packstr}_structs.pkl' + else: + struct_tmp_file = f'{work_dir}/{model_name}_{dataset_name}_{dataset.fps}fps_{packstr}_structs.pkl' + structs = track_progress_rich( + dataset.build_prompt, + tasks=build_prompt_input, + nproc=api_nproc, + save=struct_tmp_file, + keys=indices, + ) + + if dataset.nframe > 0: + out_file = f'{work_dir}/{model_name}_{dataset_name}_{dataset.nframe}frame_{packstr}_supp.pkl' + else: + out_file = f'{work_dir}/{model_name}_{dataset_name}_{dataset.fps}fps_{packstr}_supp.pkl' + res = load(out_file) if osp.exists(out_file) else {} + + structs = [s for i, s in zip(indices, structs) if i not in res or res[i] == FAIL_MSG] + structs = [struct for struct in structs if struct is not None] + indices = [i for i in indices if i not in res or res[i] == FAIL_MSG] + + gen_func = model.generate + structs = [dict(message=struct, dataset=dataset_name) for struct in structs] + + if len(structs): + track_progress_rich(gen_func, structs, nproc=api_nproc, chunksize=api_nproc, save=out_file, keys=indices) + + res = load(out_file) + return res + + +def infer_data(model, model_name, work_dir, dataset, out_file, verbose=False, api_nproc=4, use_vllm=False): + res = load(out_file) if osp.exists(out_file) else {} + rank, world_size = get_rank_and_world_size() + dataset_name = dataset.dataset_name + + sample_indices = list(dataset.videos) if getattr(dataset, 'pack', False) else list(dataset.data['index']) + samples = list(dataset.videos) if getattr(dataset, 'pack', False) else list(range(len(dataset.data))) + sample_map = {i: s for i, s in zip(sample_indices, samples)} + + sample_indices_sub = sample_indices[rank::world_size] + if np.all([idx in res for idx in sample_indices_sub]): + return model + sample_indices_subrem = [x for x in sample_indices_sub if x not in res] + + kwargs = {} + if model_name is not None and ( + 'Llama-4' in model_name + or 'Qwen2-VL' in model_name + or 'Qwen2.5-VL' in model_name + or 'Qwen2.5-Omni' in model_name + ): + kwargs = {'use_vllm': use_vllm} + + # (25.06.05) In newer version of transformers (after 4.50), with device_map='auto' and torchrun launcher, + # Transformers automatically adopt TP parallelism, which leads to compatibility problems with VLMEvalKit + # (In VLMEvalKit, we use torchrun to launch multiple model instances on a single node). + # To bypass this problem, we unset `WORLD_SIZE` before building the model to not use TP parallel. + ws_bak = os.environ.pop('WORLD_SIZE', None) + model = supported_VLM[model_name](**kwargs) if isinstance(model, str) else model + if ws_bak: + os.environ['WORLD_SIZE'] = ws_bak + + is_api = getattr(model, 'is_api', False) + if is_api: + assert world_size == 1 + supp = infer_data_api( + model=model, + work_dir=work_dir, + model_name=model_name, + dataset=dataset, + samples_dict={k: sample_map[k] for k in sample_indices_subrem}, + api_nproc=api_nproc) + for k in sample_indices_subrem: + assert k in supp + res.update(supp) + dump(res, out_file) + return model + + assert not getattr(dataset, 'pack', False), 'Current model not supported pack mode!' + if 'megabench' in dataset_name.lower() and 'llava_onevision' in model_name: + print( + 'LLaVA-OneVision does not support Megabench dataset as video dataset, ' + 'will set its VIDEO_LLM to False to enable multi-image input for video.' + ) + setattr(model, 'VIDEO_LLM', False) + + for i, idx in tqdm(enumerate(sample_indices_subrem)): + if idx in res: + continue + if getattr(model, 'nframe', None) is not None and getattr(model, 'nframe', 0) > 0: + if dataset.nframe > 0: + if getattr(model, 'nframe', 0) != dataset.nframe: + print(f'{model_name} is a video-llm model, nframe is set to {dataset.nframe}, not using default') + setattr(model, 'nframe', dataset.nframe) + elif getattr(model, 'fps', 0) == 0: + raise ValueError(f'fps is not suitable for {model_name}') + else: + setattr(model, 'nframe', None) + if getattr(model, 'fps', None) is not None and getattr(model, 'fps', 0) > 0: + if dataset.fps > 0: + if getattr(model, 'fps', 0) != dataset.fps: + print(f'{model_name} is a video-llm model, fps is set to {dataset.fps}, not using default') + setattr(model, 'fps', dataset.fps) + elif getattr(model, 'nframe', 0) == 0: + raise ValueError(f'nframe is not suitable for {model_name}') + else: + setattr(model, 'fps', None) + if ( + 'Qwen2-VL' in model_name + or 'Qwen2.5-VL' in model_name + or 'Qwen2.5-Omni' in model_name + ): + if getattr(model, 'nframe', None) is None and dataset.nframe > 0: + print(f'using {model_name} default setting for video, dataset.nframe is ommitted') + if getattr(model, 'fps', None) is None and dataset.fps > 0: + print(f'using {model_name} default setting for video, dataset.fps is ommitted') + if 'SUB_DATASET' in dataset.data.iloc[sample_map[idx]]: + dataset_name = dataset.data.iloc[sample_map[idx]]['SUB_DATASET'] + if hasattr(model, 'use_custom_prompt') and model.use_custom_prompt(dataset_name): + if dataset.nframe == 0: + raise ValueError(f'nframe must be set for custom prompt, fps is not suitable for {model_name}') + struct = model.build_prompt( + dataset.data.iloc[sample_map[idx]], dataset=dataset, video_llm=getattr(model, 'VIDEO_LLM', False) + ) + else: + struct = dataset.build_prompt( + sample_map[idx], video_llm=getattr(model, 'VIDEO_LLM', False) + ) + if struct is None: + continue + + # If `SKIP_ERR` flag is set, the model will skip the generation if error is encountered + if os.environ.get('SKIP_ERR', False) == '1': + FAIL_MSG = 'Failed to obtain answer' + try: + response = model.generate(message=struct, dataset=dataset_name) + except RuntimeError as err: + torch.cuda.synchronize() + warnings.error(f'{type(err)} {str(err)}') + response = f'{FAIL_MSG}: {type(err)} {str(err)}' + else: + response = model.generate(message=struct, dataset=dataset_name) + torch.cuda.empty_cache() + + if verbose: + print(response, flush=True) + + res[idx] = response + if (i + 1) % 20 == 0: + dump(res, out_file) + + res = {k: res[k] for k in sample_indices_sub} + dump(res, out_file) + return model + + +# A wrapper for infer_data, do the pre & post processing +def infer_data_job_video( + model, + work_dir, + model_name, + dataset, + result_file_name, + verbose=False, + api_nproc=4, + use_vllm=False): + + dataset_name = dataset.dataset_name + rank, world_size = get_rank_and_world_size() + result_file = osp.join(work_dir, result_file_name) + # Dump Predictions to Prev File if result file exists + if osp.exists(result_file): + return model + + tmpl = osp.join(work_dir, '{}' + f'{world_size}_{osp.splitext(result_file_name)[0]}.pkl') + out_file = tmpl.format(rank) + + model = infer_data( + model=model, + model_name=model_name, + work_dir=work_dir, + dataset=dataset, + out_file=out_file, + verbose=verbose, + api_nproc=api_nproc, + use_vllm=use_vllm) + + if world_size > 1: + dist.barrier() + + if rank == 0: + data_all = {} + for i in range(world_size): + data_all.update(load(tmpl.format(i))) + + meta = dataset.data + if dataset_name == 'MMBench-Video' and getattr(dataset, 'pack', False): + meta, vstats = dataset.load_pack_answers(data_all) + print(f'Statitics of Pack Video Inference: {vstats}') + else: + for x in meta['index']: + assert x in data_all + meta['prediction'] = [str(data_all[x]) for x in meta['index']] + if 'image' in meta: + meta.pop('image') + + dump(meta, result_file) + for i in range(world_size): + os.remove(tmpl.format(i)) + return model diff --git a/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/misc.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..862eabeda85f86c51d999494f96d51a21a51887e Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/smp/__pycache__/misc.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/tools.py b/VLMEvalKit-sudoku/vlmeval/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..98449f841816c3cd282b4c23764fe243c30748e8 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/tools.py @@ -0,0 +1,659 @@ +import sys +from collections import deque +from vlmeval.dataset import SUPPORTED_DATASETS +from vlmeval.config import * +from vlmeval.smp import * + +# Define valid modes +MODES = ('dlist', 'mlist', 'missing', 'circular', 'localize', 'check', 'run', 'eval', 'merge_pkl', 'scan') + +CLI_HELP_MSG = \ + f""" + Arguments received: {str(['vlmutil'] + sys.argv[1:])}. vlmutil commands use the following syntax: + + vlmutil MODE MODE_ARGS + + Where MODE (required) is one of {MODES} + MODE_ARG (optional) is the argument for specific mode + + Some usages for xtuner commands: (See more by using -h for specific command!) + + 1. List all the dataset by levels: l1, l2, l3, etc.: + vlmutil dlist [l1/l2/l3/...] + 2. List all the models by categories: 4.33.0, 4.37.0, api, etc.: + vlmutil mlist 4.33.0 [all/small/large] + 3. Report missing results: + vlmutil missing [l1/l2/l3/...] + 4. Create circular questions (only for multiple-choice questions with no more than 4 choices): + vlmutil circular input.tsv + 5. Create a localized version of the dataset (for very large tsv files): + vlmutil localize input.tsv + 6. Check the validity of a model: + vlmutil check [model_name/model_series] + 7. Run evaluation for missing results: + vlmutil run l2 hf + 8. Evaluate data file: + vlmutil eval [dataset_name] [prediction_file] + 9. Merge pkl files: + vlmutil merge_pkl [pkl_dir] [world_size] + 10. Scan evaluation results and detect api failure + vlmutil scan --model [model_list.txt or model_names] --data [dataset_names] --root [root_dir] + GitHub: https://github.com/open-compass/VLMEvalKit + """ # noqa: E501 + + +dataset_levels = { + 'l1': [ + ('MMVet', 'gpt-4-turbo_score.csv'), ('MMMU_DEV_VAL', 'acc.csv'), + ('MathVista_MINI', 'gpt-4-turbo_score.csv'), ('HallusionBench', 'score.csv'), + ('OCRBench', 'score.json'), ('AI2D_TEST', 'acc.csv'), ('MMStar', 'acc.csv'), + ('MMBench_V11', 'acc.csv'), ('MMBench_CN_V11', 'acc.csv') + ], + 'l2': [ + ('MME', 'score.csv'), ('LLaVABench', 'score.csv'), ('RealWorldQA', 'acc.csv'), + ('MMBench', 'acc.csv'), ('MMBench_CN', 'acc.csv'), ('CCBench', 'acc.csv'), + ('SEEDBench_IMG', 'acc.csv'), ('COCO_VAL', 'score.json'), ('POPE', 'score.csv'), + ('ScienceQA_VAL', 'acc.csv'), ('ScienceQA_TEST', 'acc.csv'), ('MMT-Bench_VAL', 'acc.csv'), + ('SEEDBench2_Plus', 'acc.csv'), ('BLINK', 'acc.csv'), ('MTVQA_TEST', 'acc.json'), + ('Q-Bench1_VAL', 'acc.csv'), ('A-Bench_VAL', 'acc.csv'), ('R-Bench-Dis', 'acc.csv'), + ], + 'l3': [ + ('OCRVQA_TESTCORE', 'acc.csv'), ('TextVQA_VAL', 'acc.csv'), + ('ChartQA_TEST', 'acc.csv'), ('DocVQA_VAL', 'acc.csv'), ('InfoVQA_VAL', 'acc.csv'), + ('SEEDBench2', 'acc.csv') + ], + 'live': [ + ('LiveMMBench_VQ_circular', 'acc.csv'), ('LiveMMBench_Spatial_circular', 'acc.csv'), + ('LiveMMBench_Reasoning_circular', 'acc.csv'), ('LiveMMBench_Infographic', 'acc.csv'), + ('LiveMMBench_Perception', 'acc.csv'), ('LiveMMBench_Creation', 'merged_score.json'), + ], + 'math': [ + ('MathVision', 'score.csv'), ('MathVerse_MINI_Vision_Only', 'score.csv'), + ('DynaMath', 'score.csv'), ('WeMath', 'score.csv'), ('LogicVista', 'score.csv'), + ('MathVista_MINI', 'gpt-4-turbo_score.csv'), + ], + 'spatial': [ + ('LEGO_circular', 'acc_all.csv'), ('BLINK_circular', 'acc_all.csv'), ('MMSIBench_circular', 'acc_all.csv'), + ('Spatial457', 'score.json'), ('3DSRBench', 'acc_all.csv') + ], + 'ESOV_GA': [ + ('MMBench_V11', 'acc.csv'), ('MMBench_CN_V11', 'acc.csv'), ('MEGABench_core_64frame', 'score.json'), + ('MMStar', 'acc.csv'), ('RealWorldQA', 'acc.csv') + ], + 'ESOV_GO': [ + ('MMBench_V11', 'acc.csv'), ('MMBench_CN_V11', 'acc.csv'), ('MEGABench_core_16frame', 'score.json'), + ('MMStar', 'acc.csv'), ('RealWorldQA', 'acc.csv') + ], + 'ESOV_R': [ + ('MathVista_MINI', 'gpt-4-turbo_score.csv'), ('MathVision', 'score.csv'), ('MMMU_DEV_VAL', 'acc.csv'), + ('LogicVista', 'score.csv'), ('VisuLogic', 'acc.csv') + ], + 'ESOV_I': [ + ('CCOCR', 'acc.csv'), ('AI2D_TEST', 'acc.csv'), ('SEEDBench2_Plus', 'acc.csv'), + ('CharXiv_reasoning_val', 'acc.csv'), ('CharXiv_descriptive_val', 'acc.csv'), + ], + 'ESOV_S': [ + ('Physics', 'score.csv'), ('MicroVQA', 'acc.csv'), ('MSEarthMCQ', 'acc.csv'), + ('SFE', 'score.csv'), ('SFE-zh', 'score.csv'), ('MMSci_DEV_MCQ', 'acc.csv'), + ('XLRS-Bench-lite', 'acc.csv'), ('OmniEarth-Bench', 'acc.csv') + ] +} + +dataset_levels['l12'] = dataset_levels['l1'] + dataset_levels['l2'] +dataset_levels['l23'] = dataset_levels['l2'] + dataset_levels['l3'] +dataset_levels['l123'] = dataset_levels['l12'] + dataset_levels['l3'] + +models = { + '4.33.0': list(qwen_series) + list(xcomposer_series) + [ + 'mPLUG-Owl2', 'flamingov2', 'VisualGLM_6b', 'MMAlaya', 'PandaGPT_13B', 'VXVERSE' + ] + list(idefics_series) + list(minigpt4_series) + list(instructblip_series), + '4.37.0': [x for x in llava_series if 'next' not in x] + list(internvl_series) + [ + 'TransCore_M', 'emu2_chat', 'MiniCPM-V', 'MiniCPM-V-2', 'OmniLMM_12B', + 'cogvlm-grounding-generalist', 'cogvlm-chat', 'cogvlm2-llama3-chat-19B', + 'mPLUG-Owl3' + ] + list(xtuner_series) + list(yivl_series) + list(deepseekvl_series) + list(janus_series) + list(cambrian_series), + '4.36.2': ['Moondream1'], + '4.40.0': [ + 'idefics2_8b', 'Bunny-llama3-8B', 'MiniCPM-Llama3-V-2_5', '360VL-70B', 'Phi-3-Vision', + ] + list(wemm_series), + '4.44.0': ['Moondream2'], + '4.45.0': ['Aria'], + 'latest': ['paligemma-3b-mix-448', 'MiniCPM-V-2_6', 'glm-4v-9b'] + [x for x in llava_series if 'next' in x] + + list(chameleon_series) + list(ovis_series) + list(mantis_series), + 'api': list(api_models) +} + +# SKIP_MODELS will be skipped in report_missing and run APIs +SKIP_MODELS = [ + 'MGM_7B', 'GPT4V_HIGH', 'GPT4V', 'flamingov2', 'PandaGPT_13B', + 'GeminiProVision', 'Step1V-0701', 'SenseNova-V6', + 'llava_v1_7b', 'sharegpt4v_7b', 'sharegpt4v_13b', + 'llava-v1.5-7b-xtuner', 'llava-v1.5-13b-xtuner', + 'cogvlm-grounding-generalist', 'InternVL-Chat-V1-1', + 'InternVL-Chat-V1-2', 'InternVL-Chat-V1-2-Plus', 'RekaCore', + 'llava_next_72b', 'llava_next_110b', 'MiniCPM-V', 'sharecaptioner', 'XComposer', + 'VisualGLM_6b', 'idefics_9b_instruct', 'idefics_80b_instruct', + 'mPLUG-Owl2', 'MMAlaya', 'OmniLMM_12B', 'emu2_chat', 'VXVERSE' +] + list(minigpt4_series) + list(instructblip_series) + list(xtuner_series) + list(chameleon_series) + list(vila_series) + +LARGE_MODELS = [ + 'idefics_80b_instruct', '360VL-70B', 'emu2_chat', 'InternVL2-76B', +] + + +def completed(m, d, suf): + score_file = f'outputs/{m}/{m}_{d}_{suf}' + if osp.exists(score_file): + return True + if d == 'MMBench': + s1, s2 = f'outputs/{m}/{m}_MMBench_DEV_EN_{suf}', f'outputs/{m}/{m}_MMBench_TEST_EN_{suf}' + return osp.exists(s1) and osp.exists(s2) + elif d == 'MMBench_CN': + s1, s2 = f'outputs/{m}/{m}_MMBench_DEV_CN_{suf}', f'outputs/{m}/{m}_MMBench_TEST_CN_{suf}' + return osp.exists(s1) and osp.exists(s2) + return False + + +def DLIST(lvl): + if lvl in dataset_levels.keys(): + return [x[0] for x in dataset_levels[lvl]] + else: + from vlmeval.dataset import SUPPORTED_DATASETS + return SUPPORTED_DATASETS + + +def MLIST(lvl, size='all'): + if lvl == 'all': + from vlmeval.config import supported_VLM + return [x for x in supported_VLM] + + model_list = models[lvl] + if size == 'small': + model_list = [m for m in model_list if m not in LARGE_MODELS] + elif size == 'large': + model_list = [m for m in model_list if m in LARGE_MODELS] + return [x[0] for x in model_list] + + +def MISSING(lvl): + from vlmeval.config import supported_VLM + models = list(supported_VLM) + models = [m for m in models if m not in SKIP_MODELS and osp.exists(osp.join('outputs', m))] + if lvl in dataset_levels.keys(): + data_list = dataset_levels[lvl] + else: + data_list = [(D, suff) for (D, suff) in dataset_levels['l123'] if D == lvl] + missing_list = [] + for f in models: + for D, suff in data_list: + if not completed(f, D, suff): + missing_list.append((f, D)) + return missing_list + + +def CIRCULAR(inp): + def proc_str(s): + chs = set(s) + chs = [x for x in chs if x not in string.ascii_letters and x != ' '] + for ch in chs: + s = s.replace(ch, ' ') + return s + + def abnormal_entry(line): + choices = {k: line[k] for k in string.ascii_uppercase if k in line and not pd.isna(line[k])} + has_label = False + for k in choices: + s = proc_str(choices[k]).split() + hit_words = [x for x in s if x in choices] + hit_words = set(hit_words) + if len(hit_words) > 1: + return True + if choices[k] in string.ascii_uppercase: + has_label = True + return has_label + + assert inp.endswith('.tsv') + data = load(inp) + OFFSET = 1e6 + while max(data['index']) >= OFFSET: + OFFSET *= 10 + n_opt = 2 + for i, ch in enumerate(string.ascii_uppercase): + if ch in data: + n_opt = ord(ch) - ord('A') + 1 + else: + for j in range(i + 1, 26): + assert string.ascii_uppercase[j] not in data + groups = defaultdict(list) + for i in range(len(data)): + item = data.iloc[i] + this_n_opt = 0 + for j, ch in enumerate(string.ascii_uppercase[:n_opt]): + if not pd.isna(item[ch]): + this_n_opt = j + 1 + else: + for k in range(j + 1, n_opt): + assert pd.isna(item[string.ascii_uppercase[k]]), (k, item) + assert this_n_opt >= 2 or this_n_opt == 0 + flag = abnormal_entry(item) + if flag or this_n_opt == 0: + groups['abnormal'].append(item) + elif len(item['answer']) > 1 or item['answer'] not in string.ascii_uppercase[:this_n_opt]: + groups['abnormal'].append(item) + else: + groups[this_n_opt].append(item) + for k in groups: + groups[k] = pd.concat(groups[k], axis=1).T + print(f'{k if k == "abnormal" else str(k) + "-choice"} records: {len(groups[k])}') + + data_all = [] + + for k in groups: + if k == 'abnormal': + warnings.warn( + f"{len(groups['abnormal'])} abnormal entries detected. The problems can be: " + "1. Choice labels found in some choice contents; 2. No choices found for this question; " + "3. The answer is not a valid choice. Will not apply circular to those samples." + ) + abdata = groups['abnormal'] + abdata['g_index'] = abdata['index'] + data_all.append(abdata) + else: + cir_data = [] + assert isinstance(k, int) and k >= 2 + labels = string.ascii_uppercase[:k] + rotates = [labels] + dq = deque(labels) + for i in range(k - 1): + dq.rotate(1) + rotates.append(list(dq)) + for i, rot in enumerate(rotates): + if i == 0: + data = groups[k].copy() + data['g_index'] = data['index'] + cir_data.append(data) + else: + try: + data = groups[k].copy() + data['index'] = [int(x + OFFSET * i) for x in data['index']] + data['g_index'] = [int(x % OFFSET) for x in data['index']] + data['image'] = data['g_index'] + c_map = {k: v for k, v in zip(rotates[0], rot)} + data['answer'] = [c_map[x] for x in data['answer']] + for s, t in c_map.items(): + data[t] = groups[k][s] + cir_data.append(data) + except: + print(set(data['answer'])) + raise NotImplementedError + data_all.append(pd.concat(cir_data)) + data_all = pd.concat(data_all) + data_all['index'] = [int(x) for x in data_all['index']] + data_all['g_index'] = [int(x) for x in data_all['g_index']] + + tgt_file = inp.replace('.tsv', '_circular.tsv') + dump(data_all, tgt_file) + print(f'Processed data are saved to {tgt_file}: {len(load(inp))} raw records, {len(data_all)} circularized records.') # noqa: E501 + assert osp.exists(tgt_file) + print(f'The MD5 for the circularized data is {md5(tgt_file)}') + + +PTH = osp.realpath(__file__) +IMAGE_PTH = osp.join(osp.dirname(PTH), '../assets/apple.jpg') + +msg1 = [ + IMAGE_PTH, + 'What is in this image?' +] +msg2 = [ + dict(type='image', value=IMAGE_PTH), + dict(type='text', value='What is in this image?') +] +msg3 = [ + IMAGE_PTH, + IMAGE_PTH, + 'How many apples are there in these images?' +] +msg4 = [ + dict(type='image', value=IMAGE_PTH), + dict(type='image', value=IMAGE_PTH), + dict(type='text', value='How many apples are there in these images?') +] + + +def CHECK(val): + if val in supported_VLM: + model = supported_VLM[val]() + print(f'Model: {val}') + for i, msg in enumerate([msg1, msg2, msg3, msg4]): + if i > 1 and not model.INTERLEAVE: + continue + res = model.generate(msg) + print(f'Test {i + 1}: {res}') + elif val in models: + model_list = models[val] + for m in model_list: + CHECK(m) + + +def LOCALIZE(fname, new_fname=None): + if new_fname is None: + new_fname = fname.replace('.tsv', '_local.tsv') + + base_name = osp.basename(fname) + dname = osp.splitext(base_name)[0] + + data = load(fname) + data_new = localize_df(data, dname) + dump(data_new, new_fname) + print(f'The localized version of data file is {new_fname}') + return new_fname + + +def RUN(lvl, model): + import torch + NGPU = torch.cuda.device_count() + SCRIPT = osp.join(osp.dirname(__file__), '../run.py') + logger = get_logger('Run Missing') + + def get_env(name): + assert name in ['433', '437', '440', 'latest'] + load_env() + env_key = f'ENV_{name}' + return os.environ.get(env_key, None) + + missing = MISSING(lvl) + if model == 'all': + pass + elif model == 'api': + missing = [x for x in missing if x[0] in models['api']] + elif model == 'hf': + missing = [x for x in missing if x[0] not in models['api']] + elif model in models: + missing = [x for x in missing if x[0] in models[missing]] + elif model in supported_VLM: + missing = [x for x in missing if x[0] == model] + else: + warnings.warn(f'Invalid model {model}.') + + missing.sort(key=lambda x: x[0]) + groups = defaultdict(list) + for m, D in missing: + groups[m].append(D) + for m in groups: + if m in SKIP_MODELS: + continue + for dataset in groups[m]: + logger.info(f'Running {m} on {dataset}') + exe = 'python' if m in LARGE_MODELS or m in models['api'] else 'torchrun' + if m not in models['api']: + env = None + env = 'latest' if m in models['latest'] else env + env = '433' if m in models['4.33.0'] else env + env = '437' if m in models['4.37.0'] else env + env = '440' if m in models['4.40.0'] else env + if env is None: + # Not found, default to latest + env = 'latest' + logger.warning( + f"Model {m} does not have a specific environment configuration. Defaulting to 'latest'.") + pth = get_env(env) + if pth is not None: + exe = osp.join(pth, 'bin', exe) + else: + logger.warning(f'Cannot find the env path {env} for model {m}') + if exe.endswith('torchrun'): + cmd = f'{exe} --nproc-per-node={NGPU} {SCRIPT} --model {m} --data {dataset}' + elif exe.endswith('python'): + cmd = f'{exe} {SCRIPT} --model {m} --data {dataset}' + os.system(cmd) + + +def EVAL(dataset_name, data_file, **kwargs): + from vlmeval.dataset import build_dataset + logger = get_logger('VLMEvalKit Tool-Eval') + dataset = build_dataset(dataset_name) + # Set the judge kwargs first before evaluation or dumping + judge_kwargs = {'nproc': 4, 'verbose': True} + if 'model' not in kwargs: + if dataset.TYPE in ['MCQ', 'Y/N', 'MCQ_MMMU_Pro']: + judge_kwargs['model'] = 'chatgpt-0125' + elif listinstr(['MMVet', 'LLaVABench', 'MMBench-Video'], dataset_name): + judge_kwargs['model'] = 'gpt-4-turbo' + elif listinstr(['MMLongBench', 'MMDU'], dataset_name): + judge_kwargs['model'] = 'gpt-4o' + elif listinstr(['DynaMath', 'MathVerse', 'MathVista', 'MathVision'], dataset_name): + judge_kwargs['model'] = 'gpt-4o-mini' + elif listinstr(['SFE'], dataset_name): + judge_kwargs['model'] = 'gpt-4o-1120' + else: + judge_kwargs['model'] = kwargs['model'] + judge_kwargs['nproc'] = kwargs.get('nproc', 4) + eval_results = dataset.evaluate(data_file, **judge_kwargs) + if eval_results is not None: + assert isinstance(eval_results, dict) or isinstance(eval_results, pd.DataFrame) + logger.info('Evaluation Results:') + if isinstance(eval_results, dict): + logger.info('\n' + json.dumps(eval_results, indent=4)) + elif isinstance(eval_results, pd.DataFrame): + logger.info('\n') + logger.info(tabulate(eval_results.T) if len(eval_results) < len(eval_results.columns) else eval_results) + return eval_results + + +def parse_args_eval(): + parser = argparse.ArgumentParser() + # Essential Args, Setting the Names of Datasets and Models + parser.add_argument('cmd', type=str) + parser.add_argument('data_file', type=str) + parser.add_argument('--judge', type=str, default=None) + parser.add_argument('--api-nproc', type=int, default=4) + parser.add_argument('--retry', type=int, default=None) + args = parser.parse_args() + return args + + +def parse_args_scan(): + parser = argparse.ArgumentParser() + parser.add_argument('--model', type=str, nargs='+') + parser.add_argument('--data', type=str, nargs='+') + parser.add_argument('--root', type=str, default=None) + args, unknownargs = parser.parse_known_args() + return args, unknownargs + + +def parse_args_sync(): + parser = argparse.ArgumentParser() + parser.add_argument('--src', type=str, default='/home/kenny/mmeval') + parser.add_argument('--tgt', type=str, default='/home/kenny/volc/mmeval') + parser.add_argument('--data', type=str, nargs='+') + args, unknownargs = parser.parse_known_args() + return args, unknownargs + + +def MERGE_PKL(pkl_dir, world_size=1): + prefs = [] + for ws in list(range(1, 9)): + prefs.extend([f'{i}{ws}_' for i in range(ws)]) + prefs = set(prefs) + files = os.listdir(pkl_dir) + files = [x for x in files if x[:3] in prefs] + # Merge the files + res_all = defaultdict(dict) + for f in files: + full_path = osp.join(pkl_dir, f) + key = f[3:] + res_all[key].update(load(full_path)) + os.remove(full_path) + + dump_prefs = [f'{i}{world_size}_' for i in range(world_size)] + for k in res_all: + for pf in dump_prefs: + dump(res_all[k], f'{pkl_dir}/{pf}{k}') + print(f'Merged {len(res_all[k])} records into {pkl_dir}/{dump_prefs[0]}{k}') + + +def SCAN_ONE(root, model, dataset): + from termcolor import colored + FAIL_MSG = 'Failed to obtain answer via API.' + root = osp.join(root, model) + pred_format = get_pred_file_format() + fname = f'{model}_{dataset}.{pred_format}' + pth = osp.join(root, fname) + if osp.exists(pth): + data = load(pth) + # Detect Failure + assert 'prediction' in data + data['prediction'] = [str(x) for x in data['prediction']] + fail = [FAIL_MSG in x for x in data['prediction']] + if sum(fail): + nfail = sum(fail) + ntot = len(fail) + print(colored(f'Model {model} x Dataset {dataset} Inference: {nfail} out of {ntot} failed. {nfail / ntot * 100: .2f}%. ', 'light_red')) # noqa: E501 + + eval_files = ls(root, match=f'{model}_{dataset}_') + eval_files = [x for x in eval_files if listinstr([f'{dataset}_openai', f'{dataset}_gpt'], x) and x.endswith('.xlsx')] # noqa: E501 + + if len(eval_files) == 0: + return + + for eval_file in eval_files: + data = load(eval_file) + + if 'MMVet' in dataset: + bad = [x for x in data['log'] if 'All 5 retries failed.' in str(x)] + if len(bad): + print(f'Evaluation ({eval_file}): {len(bad)} out of {len(data)} failed.') + elif 'MathVista' in dataset: + bad = [x for x in data['res'] if FAIL_MSG in str(x)] + if len(bad): + print(f'Evaluation ({eval_file}): {len(bad)} out of {len(data)} failed.') + elif dataset == 'LLaVABench': + sub = data[data['gpt4_score'] == -1] + sub = sub[sub['gpt4_score'] == -1] + if len(sub): + print(f'Evaluation ({eval_file}): {len(sub)} out of {len(data)} failed.') + else: + if 'log' in data: + bad = [x for x in data['log'] if FAIL_MSG in str(x)] + if len(bad): + print(f'Evaluation ({eval_file}): {len(bad)} out of {len(data)} failed.') + else: + print(colored(f'Model {model} x Dataset {dataset} Inference Result Missing! ', 'red')) + + +def SCAN(root, models, datasets): + for m in models: + if not osp.exists(osp.join(root, m)): + warnings.warn(f'Model {m} not found in {root}') + continue + cur_datasets = [] + if len(datasets) == 0: + for d in SUPPORTED_DATASETS: + pred_format = get_pred_file_format() + if osp.exists(osp.join(root, m, f'{m}_{d}.{pred_format}')): + cur_datasets.append(d) + else: + cur_datasets = datasets + cur_datasets = list(set(cur_datasets)) + cur_datasets.sort() + for d in cur_datasets: + SCAN_ONE(root, m, d) + print(colored(f'Finished scanning datasets {cur_datasets} for model {m}.', 'green')) + + +def cli(): + logger = get_logger('VLMEvalKit Tools') + args = sys.argv[1:] + if not args: # no arguments passed + logger.info(CLI_HELP_MSG) + return + + if args[0].lower() == 'dlist': + assert len(args) >= 2 + res = [] + for arg in args[1:]: + lst = DLIST(arg) + res.extend(lst) + print(' '.join(res)) + elif args[0].lower() == 'mlist': + assert len(args) >= 2 + size = 'all' + if len(args) > 2: + size = args[2].lower() + lst = MLIST(args[1], size) + print('\n'.join(lst)) + elif args[0].lower() == 'missing': + assert len(args) >= 2 + missing_list = MISSING(args[1]) + logger = get_logger('Find Missing') + logger.info(colored(f'Level {args[1]} Missing Results: ', 'red')) + lines = [] + for m, D in missing_list: + line = f'Model {m}, Dataset {D}' + logger.info(colored(line, 'red')) + lines.append(line) + mwlines(lines, f'{args[1]}_missing.txt') + elif args[0].lower() == 'circular': + assert len(args) >= 2 + CIRCULAR(args[1]) + elif args[0].lower() == 'localize': + assert len(args) >= 2 + LOCALIZE(args[1]) + elif args[0].lower() == 'check': + assert len(args) >= 2 + model_list = args[1:] + for m in model_list: + CHECK(m) + elif args[0].lower() == 'run': + assert len(args) >= 2 + lvl = args[1] + if len(args) == 2: + model = 'all' + RUN(lvl, model) + else: + for model in args[2:]: + RUN(lvl, model) + elif args[0].lower() == 'eval': + args = parse_args_eval() + data_file = args.data_file + + def extract_dataset(file_name): + fname = osp.splitext(file_name)[0].split('/')[-1] + parts = fname.split('_') + for i in range(len(parts)): + if '_'.join(parts[i:]) in SUPPORTED_DATASETS: + return '_'.join(parts[i:]) + return None + + dataset = extract_dataset(data_file) + assert dataset is not None, f'Cannot infer dataset name from {data_file}' + kwargs = {'nproc': args.api_nproc} + if args.judge is not None: + kwargs['model'] = args.judge + if args.retry is not None: + kwargs['retry'] = args.retry + EVAL(dataset_name=dataset, data_file=data_file, **kwargs) + elif args[0].lower() == 'merge_pkl': + assert len(args) == 3 + args[2] = int(args[2]) + assert args[2] in [1, 2, 4, 8] + MERGE_PKL(args[1], args[2]) + elif args[0].lower() == 'scan': + args, unknownargs = parse_args_scan() + # The default value is only for the maintainer usage + root = args.root if args.root is not None else os.getcwd() + models = [] + for m in args.model: + if osp.exists(m) and m.endswith('.txt'): + lines = mrlines(m) + models.extend([x.split()[0] for x in lines if len(x.split()) >= 1]) + else: + models.append(m) + assert len(models) + datasets = args.data + SCAN(root, models, datasets if datasets is not None else []) + else: + logger.error('WARNING: command error!') + logger.info(CLI_HELP_MSG) + return diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__init__.py b/VLMEvalKit-sudoku/vlmeval/vlm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..822a0a2a7f61d6350389bfff9f1c7c87c7a63add --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/__init__.py @@ -0,0 +1,112 @@ +import torch + +torch.set_grad_enabled(False) +torch.manual_seed(1234) +from .aria import Aria +from .base import BaseModel +from .hawk_vl import HawkVL +from .thyme import Thyme +from .cogvlm import CogVlm, GLM4v, GLMThinking +from .emu import Emu, Emu3_chat, Emu3_gen +from .eagle_x import Eagle +from .granite_vision import GraniteVision3 +from .idefics import IDEFICS, IDEFICS2 +from .instructblip import InstructBLIP +from .kosmos import Kosmos2 +from .llava import ( + LLaVA, + LLaVA_Next, + LLaVA_XTuner, + LLaVA_Next2, + LLaVA_OneVision, + LLaVA_OneVision_HF, +) +from .vita import VITA, VITAQwen2 +from .long_vita import LongVITA +from .minicpm_v import MiniCPM_V, MiniCPM_Llama3_V, MiniCPM_V_2_6, MiniCPM_o_2_6, MiniCPM_V_4, MiniCPM_V_4_5 +from .minigpt4 import MiniGPT4 +from .mmalaya import MMAlaya, MMAlaya2 +from .monkey import Monkey, MonkeyChat +from .moondream import Moondream1, Moondream2 +from .minimonkey import MiniMonkey +from .mplug_owl2 import mPLUG_Owl2 +from .omnilmm import OmniLMM12B +from .open_flamingo import OpenFlamingo +from .pandagpt import PandaGPT +from .qwen_vl import QwenVL, QwenVLChat +from .qwen2_vl import Qwen2VLChat, Qwen2VLChatAguvis +from .transcore_m import TransCoreM +from .visualglm import VisualGLM +from .xcomposer import ( + ShareCaptioner, + XComposer, + XComposer2, + XComposer2_4KHD, + XComposer2d5, +) +from .yi_vl import Yi_VL +from .internvl import InternVLChat +from .deepseek_vl import DeepSeekVL +from .deepseek_vl2 import DeepSeekVL2 +from .janus import Janus +from .mgm import Mini_Gemini +from .bunnyllama3 import BunnyLLama3 +from .vxverse import VXVERSE +from .gemma import PaliGemma, Gemma3 +from .qh_360vl import QH_360VL +from .phi3_vision import Phi3Vision, Phi3_5Vision +from .phi4_multimodal import Phi4Multimodal +from .wemm import WeMM +from .cambrian import Cambrian +from .chameleon import Chameleon +from .video_llm import ( + VideoLLaVA, + VideoLLaVA_HF, + Chatunivi, + VideoChatGPT, + LLaMAVID, + VideoChat2_HD, + PLLaVA, +) +from .vila import VILA, NVILA +from .ovis import Ovis, Ovis1_6, Ovis1_6_Plus, Ovis2, OvisU1 +from .mantis import Mantis +from .mixsense import LLama3Mixsense +from .parrot import Parrot +from .omchat import OmChat +from .rbdash import RBDash +from .xgen_mm import XGenMM +from .slime import SliME +from .mplug_owl3 import mPLUG_Owl3 +from .pixtral import Pixtral +from .llama_vision import llama_vision +from .llama4 import llama4 +from .molmo import molmo +from .points import POINTS, POINTSV15 +from .nvlm import NVLM +from .vintern_chat import VinternChat +from .h2ovl_mississippi import H2OVLChat +from .falcon_vlm import Falcon2VLM +from .smolvlm import SmolVLM, SmolVLM2 +from .sail_vl import SailVL +from .valley import Valley2Chat +from .ross import Ross +from .ola import Ola +from .x_vl import X_VL_HF +from .ursa import UrsaChat +from .vlm_r1 import VLMR1Chat +from .aki import AKI +from .ristretto import Ristretto +from .vlaa_thinker import VLAAThinkerChat +from .kimi_vl import KimiVL +from .wethink_vl import WeThinkVL +from .flash_vl import FlashVL +from .oryx import Oryx +from .treevgr import TreeVGR +from .varco_vision import VarcoVision +from .qtunevl import ( + QTuneVL, + QTuneVLChat, +) +from .logics import Logics_Thinking +from .llava_uhd_siglip2 import LLaVA_UHD_SIGLIP2, LLaVA_UHD_SIGLIP2_SLICE \ No newline at end of file diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/aria.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/aria.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..501433b64f2596ed30af6cb1549eb08172ce7b78 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/aria.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/gemma.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/gemma.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e42b8c106a8d6cdb5850d4135dd20abc786dab45 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/gemma.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/instructblip.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/instructblip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc6698480d3405ea5d804474ebc3c21e7879d196 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/instructblip.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/janus.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/janus.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..297cc62d65104be98d9b1158913fcac022705f9a Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/janus.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/kimi_vl.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/kimi_vl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2d086a288cc229260a388b238054a1630f0c835 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/kimi_vl.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/monkey.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/monkey.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35975c1d9415b2dad3798dbf6cdc2374dd750f1f Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/monkey.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/x_vl.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/x_vl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aea69d123550202fdf0e05e3cae9e706e1abd8b1 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/x_vl.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/yi_vl.cpython-310.pyc b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/yi_vl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..987cf8f7c08d9cceb0904c43f6dc3ca03eac3389 Binary files /dev/null and b/VLMEvalKit-sudoku/vlmeval/vlm/__pycache__/yi_vl.cpython-310.pyc differ diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/aki.py b/VLMEvalKit-sudoku/vlmeval/vlm/aki.py new file mode 100644 index 0000000000000000000000000000000000000000..25ad91c9bc4cc7f43ea2a14c499342dc79196aae --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/aki.py @@ -0,0 +1,104 @@ +import torch +from PIL import Image +import warnings +from .base import BaseModel +from ..smp import splitlen, get_cache_path +from transformers import AutoTokenizer, AutoConfig +from torchvision.transforms import Compose, Resize, Lambda, ToTensor, Normalize +try: + from torchvision.transforms import InterpolationMode + BICUBIC = InterpolationMode.BICUBIC +except ImportError: + BICUBIC = Image.BICUBIC + + +class AKI(BaseModel): + INSTALL_REQ = True + INTERLEAVE = False + + def __init__(self, + name, + ckpt_pth=None, + **kwargs): + + self.name = name + try: + from open_flamingo.src.modeling_aki import AKI + except: + raise ImportError('Please first install AKIVLM from https://github.com/sony/aki') + + # replace GenerationMixin to modify attention mask handling + from transformers.generation.utils import GenerationMixin + from open_flamingo import _aki_update_model_kwargs_for_generation + GenerationMixin._update_model_kwargs_for_generation = _aki_update_model_kwargs_for_generation + + config = AutoConfig.from_pretrained(ckpt_pth) + tokenizer = AutoTokenizer.from_pretrained(ckpt_pth) + model = AKI.from_pretrained(ckpt_pth, tokenizer=tokenizer) + + n_px = getattr(config, "n_px", 384) + norm_mean = getattr(config, "norm_mean", 0.5) + norm_std = getattr(config, "norm_std", 0.5) + + image_processor = Compose([ + Resize((n_px, n_px), interpolation=InterpolationMode.BICUBIC, antialias=True), + Lambda(lambda x: x.convert('RGB')), + ToTensor(), + Normalize(mean=(norm_mean, norm_mean, norm_mean), std=(norm_std, norm_std, norm_std)) + ]) + self.model = model.eval().cuda() + + tokenizer.padding_side = 'left' + tokenizer.add_eos_token = False + self.tokenizer = tokenizer + self.image_proc = image_processor + + kwargs_default = { + 'max_new_tokens': 512, + 'temperature': 0.0, + 'do_sample': False, + 'eos_token_id': tokenizer.eos_token_id, + } + kwargs_default.update(kwargs) + self.kwargs = kwargs_default + + def apply_prompt_template(self, query): + SYSTEM_BASE = "A chat between a curious user and an artificial intelligence assistant." + SYSTEM_DETAIL = "The assistant gives helpful, detailed, and polite answers to the user's questions." + SYSTEM_MESSAGE = SYSTEM_BASE + " " + SYSTEM_DETAIL + SYSTEM_MESSAGE_ROLE = '<|system|>' + '\n' + SYSTEM_MESSAGE + '<|end|>\n' + + s = ( + f'{SYSTEM_MESSAGE_ROLE}' + f'<|user|>\n{query}<|end|>\n<|assistant|>\n' + ) + return s + + def generate_inner(self, message, dataset=None): + vision_x, prompt = [], '' + for msg in message: + if msg['type'] == 'image': + img = Image.open(msg['value']).convert('RGB') + + # [NOTE]: only use the first image in this work if including multiple images in a sample + if len(vision_x) == 0: + vision_x.append(self.image_proc(img).unsqueeze(0)) + prompt += '' + else: + warnings.warn('======Only the first image is used in the input.') + elif msg['type'] == 'text': + prompt += msg['value'] + # prompt += f"\nAnswer the question using a single word or phrase. {msg['value']}" # for YorN + + vision_x = torch.cat(vision_x, dim=0) if len(vision_x) > 1 else vision_x[0] + vision_x = vision_x.unsqueeze(1).unsqueeze(0) + prompt = self.apply_prompt_template(prompt) + lang_x = self.tokenizer([prompt], return_tensors='pt') + + generated_text = self.model.generate( + vision_x=vision_x.cuda(), + lang_x=lang_x['input_ids'].cuda(), + attention_mask=lang_x['attention_mask'].cuda(), + **self.kwargs) + generated_text = self.tokenizer.decode(generated_text[0], skip_special_tokens=True) + return generated_text diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/chameleon.py b/VLMEvalKit-sudoku/vlmeval/vlm/chameleon.py new file mode 100644 index 0000000000000000000000000000000000000000..81f9740b6ea9345bf2fe7d49ed978d4e38e0b3f8 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/chameleon.py @@ -0,0 +1,49 @@ +import os.path as osp +import warnings +from .base import BaseModel +from ..smp import * +from PIL import Image +import torch + + +class Chameleon(BaseModel): + + INSTALL_REQ = False + INTERLEAVE = True + + def __init__(self, model_path='facebook/chameleon-7b', **kwargs): + try: + from transformers import ChameleonProcessor, ChameleonForConditionalGeneration + except Exception as e: + logging.critical('Please install the latest transformers.') + raise e + + processor = ChameleonProcessor.from_pretrained(model_path) + model = ChameleonForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.bfloat16) + + self.model = model.cuda().eval() + self.processor = processor + + def generate_inner(self, message, dataset=None): + content, images = '', [] + for x in message: + if x['type'] == 'text': + content += x['value'] + elif x['type'] == 'image': + content += '\n' + images.append(Image.open(x['value'])) + + inputs = self.processor( + text=[content], + images=images, + padding=True, + return_tensors='pt' + ).to(device='cuda', dtype=torch.bfloat16) + generate_ids = self.model.generate(**inputs, max_new_tokens=2048) + input_token_len = inputs.input_ids.shape[1] + text = self.processor.batch_decode( + generate_ids[:, input_token_len:], + skip_special_tokens=True, + clean_up_tokenization_spaces=False + )[0] + return text diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/falcon_vlm.py b/VLMEvalKit-sudoku/vlmeval/vlm/falcon_vlm.py new file mode 100644 index 0000000000000000000000000000000000000000..1abf84cbbc3cdb529b4370cd74116d753d56db38 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/falcon_vlm.py @@ -0,0 +1,34 @@ +from PIL import Image +import requests + +from .base import BaseModel + + +class Falcon2VLM(BaseModel): + + INSTALL_REQ = False + INTERLEAVE = False + + def __init__(self, model_path='tiiuae/falcon-11B-vlm', **kwargs): + import torch + from transformers import LlavaNextForConditionalGeneration, LlavaNextProcessor + + self.model_path = model_path + self.processor = LlavaNextProcessor.from_pretrained(model_path, tokenizer_class='PreTrainedTokenizerFast') + self.model = LlavaNextForConditionalGeneration.from_pretrained( + model_path, torch_dtype=torch.bfloat16, device_map='cuda').eval() + default_kwargs = {'max_new_tokens': 512} + default_kwargs.update(kwargs) + self.kwargs = default_kwargs + + def generate_inner(self, message, dataset=None): + prompt, image_path = self.message_to_promptimg(message, dataset=dataset) + image = Image.open(image_path).convert('RGB') + + prompt = f'User:\n{prompt} Falcon:' + inputs = self.processor(text=prompt, images=image, return_tensors='pt').to('cuda') + + output = self.model.generate(**inputs, **self.kwargs) + prompt_length = inputs['input_ids'].shape[1] + model_response = self.processor.decode(output[0][prompt_length:], skip_special_tokens=True).strip() + return model_response diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/internvl/gui_template.yaml b/VLMEvalKit-sudoku/vlmeval/vlm/internvl/gui_template.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ada4ea11d35729e401a7c6f63f3f0b48740d33e --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/internvl/gui_template.yaml @@ -0,0 +1,45 @@ +ScreenSpot: + template_zeroshot: |- + Based on the screenshot of the page, I give a text description and you give the bounding box coordinate of the region this sentence describes: {task} + template: |- + {task} + placeholders: + - task + +ScreenSpot_Pro: + template_zeroshot: |- + Based on the screenshot of the page, I give a text description and you give the bounding box coordinate of the region this sentence describes: {task} + template: |- + {task} + placeholders: + - task + +ScreenSpot_v2: + template_zeroshot: |- + Based on the screenshot of the page, I give a text description and you give the bounding box coordinate of the region this sentence describes: {task} + template: |- + {task} + placeholders: + - task + +MM_Mind2Web: + system_prompt: |- + You are a GUI agent. You are given a task and a screenshot of the screen. You need to perform a series of pyautogui actions to complete the task. + + You have access to the following functions: + - {"name": "mobile.swipe", "description": "swipe on the screen", "parameters": {"type": "object", "properties": {"from_coord": {"type": "array", "items": {"type": "number"}, "description": "The starting coordinates of the swipe"}, "to_coord": {"type": "array", "items": {"type": "number"}, "description": "The ending coordinates of the swipe"}}, "required": ["from_coord", "to_coord"]}} + - {"name": "mobile.home", "description": "Press the home button"} + - {"name": "mobile.back", "description": "Press the back button"} + + template: |- + Please generate the next move according to the ui screenshot, instruction and previous actions. + + Instruction: + {task}. + + Previous actions: + {history}. + + placeholders: + - task + - history diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/janus.py b/VLMEvalKit-sudoku/vlmeval/vlm/janus.py new file mode 100644 index 0000000000000000000000000000000000000000..aeadd7fba5497b867823a1d9a1f0cebdd72e8580 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/janus.py @@ -0,0 +1,136 @@ +import sys +import torch +from transformers import AutoModelForCausalLM, AutoConfig +import warnings +from .base import BaseModel +from ..smp import * +from ..dataset import DATASET_TYPE + + +class Janus(BaseModel): + + INSTALL_REQ = True + INTERLEAVE = True + + def check_install(self): + try: + import janus + except Exception as e: + logging.critical( + 'Please first install janus from source codes in: https://github.com/deepseek-ai/Janus') + raise e + + def __init__(self, model_path='deepseek-ai/Janus-1.3B', **kwargs): + self.check_install() + assert model_path is not None + self.model_path = model_path + from janus.models import VLChatProcessor + + self.vl_chat_processor = VLChatProcessor.from_pretrained(model_path) + self.tokenizer = self.vl_chat_processor.tokenizer + + model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) + self.model = model.to(torch.bfloat16).cuda().eval() + + torch.cuda.empty_cache() + default_kwargs = dict( + max_new_tokens=2048, + do_sample=False, + use_cache=True, + output_logits=False, + output_scores=False, + return_dict_in_generate=False) + + default_kwargs.update(kwargs) + self.kwargs = default_kwargs + warnings.warn(f'Following kwargs received: {self.kwargs}, will use as generation config. ') + + def prepare_inputs(self, message): + def prepare_itlist(msgs): + content, images = '', [] + for s in msgs: + if s['type'] == 'image': + images.append(s['value']) + content += '' + elif s['type'] == 'text': + content += s['value'] + return content, images + conversation = [] + if 'role' not in message[0]: + content, images = prepare_itlist(message) + conversation.append(dict(role='User', content=content, images=images)) + else: + role_map = {'user': 'User', 'assistant': 'Assistant'} + for msgs in message: + role = role_map[msgs['role']] + content, images = prepare_itlist(msgs['content']) + conversation.append(dict(role=role, content=content, images=images)) + conversation.append(dict(role='Assistant', content='')) + return conversation + + def generate_inner(self, message, dataset=None): + if dataset is None or not ('MMVet' in dataset): + self.vl_chat_processor.system_prompt = "" + else: + self.vl_chat_processor.system_prompt = "You are a helpful assistant. Please answer truthfully and write out your thinking step by step to be sure you get the right answer." # noqa: E501 + + conversation = self.prepare_inputs(message) + from janus.utils.io import load_pil_images + pil_images = load_pil_images(conversation) + prepare_inputs = self.vl_chat_processor(conversations=conversation, images=pil_images, force_batchify=True) + prepare_inputs = prepare_inputs.to(self.model.device, dtype=torch.bfloat16) + inputs_embeds = self.model.prepare_inputs_embeds(**prepare_inputs) + + outputs = self.model.language_model.generate( + inputs_embeds=inputs_embeds, + attention_mask=prepare_inputs.attention_mask, + pad_token_id=self.tokenizer.eos_token_id, + bos_token_id=self.tokenizer.bos_token_id, + eos_token_id=self.tokenizer.eos_token_id, + **self.kwargs) + answer = self.tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True) + return answer + + def chat_inner(self, message, dataset=None): + return self.generate_inner(message, dataset=dataset) + + def use_custom_prompt(self, dataset): + assert dataset is not None + if DATASET_TYPE(dataset) == 'Y/N' or DATASET_TYPE(dataset) == 'MCQ' or dataset == 'MMVet': + return True + return False + + def build_prompt(self, line, dataset=None): + assert dataset is None or isinstance(dataset, str) + assert self.use_custom_prompt(dataset) + tgt_path = self.dump_image(line, dataset) + question = line['question'] + if DATASET_TYPE(dataset) == 'Y/N': + if dataset == 'POPE': + question = question.replace(" Please answer yes or no.", "") + prompt = '\n' + question + "\nAnswer the question using a single word or phrase." + elif DATASET_TYPE(dataset) == 'MCQ': + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + options_prompt = '' + for key, item in options.items(): + options_prompt += f'{key}. {item}\n' + + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + prompt = f'\nHint: {hint}\n' if hint is not None else '\n' + prompt += f'{question}\n' + prompt += ( + f"{options_prompt}\nAnswer with the option's letter from the given choices directly." + if len(options) else 'Answer the question directly. ' + ) + elif dataset == 'MMVet': + prompt = '\n' + question + else: + raise NotImplementedError + + message = [dict(type='image', value=s) for s in tgt_path] + message.extend([dict(type='text', value=prompt)]) + return message diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/kosmos.py b/VLMEvalKit-sudoku/vlmeval/vlm/kosmos.py new file mode 100644 index 0000000000000000000000000000000000000000..352af4a1a855a4bf370c963f9dcd143124ae5e6c --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/kosmos.py @@ -0,0 +1,114 @@ +import torch +import re +from PIL import Image +from abc import abstractproperty +import sys +import os.path as osp +from .base import BaseModel +from ..smp import * +from ..dataset import DATASET_TYPE +import copy + + +class Kosmos2(BaseModel): + INSTALL_REQ = True + INTERLEAVE = True + + def __init__(self, + model_path='microsoft/kosmos-2-patch14-224', + **kwargs): + try: + from transformers import AutoProcessor, Kosmos2ForConditionalGeneration + except Exception as e: + logging.critical("Please install Transformers version 4.45.1 by running: pip install transformers==4.45.1") + raise e + + assert osp.exists(model_path) or splitlen(model_path) == 2 + + self.model = ( + Kosmos2ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) + .to(torch.device('cuda')) + ) + self.processor = AutoProcessor.from_pretrained(model_path) + + default_kwargs = dict( + max_new_tokens=512, + use_cache=True + ) + + default_kwargs.update(kwargs) + self.kwargs = default_kwargs + warnings.warn(f'Following kwargs received: {self.kwargs}, will use as generation config. ') + torch.cuda.empty_cache() + + def generate_inner(self, message, dataset=None): + TASK_TOKEN = ' ' + QEUSTION_TOKEN = 'Question: ' + ANSWER_TOKEN = 'Answer: ' + images = [] + prompt = '' + + prompt += TASK_TOKEN + for s in message: + if s['type'] == 'image': + images.append(s['value']) + elif s['type'] == 'text': + prompt += QEUSTION_TOKEN + prompt += s['value'] + prompt += ANSWER_TOKEN + + images = [Image.open(s) for s in images] + inputs = self.processor(text=prompt, images=images[0], return_tensors='pt').to(torch.device('cuda')) + + generated_ids = self.model.generate( + pixel_values=inputs['pixel_values'], + input_ids=inputs['input_ids'], + attention_mask=inputs['attention_mask'], + image_embeds=None, + image_embeds_position_mask=inputs['image_embeds_position_mask'], + **self.kwargs + ) + + generated_text = self.processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + processed_text = self.processor.post_process_generation(generated_text, cleanup_and_extract=True)[0] + cleaned_answer = re.sub(r'(Question:.*?Answer:|Question:.*)', '', processed_text).strip() + return cleaned_answer + + def use_custom_prompt(self, dataset): + assert dataset is not None + if listinstr(['MMMU'], dataset): + return False + if DATASET_TYPE(dataset) == 'MCQ' or dataset == 'MMVet': + return True + return False + + def build_prompt(self, line, dataset=None): + assert dataset is None or isinstance(dataset, str) + assert self.use_custom_prompt(dataset) + tgt_path = self.dump_image(line, dataset) + question = line['question'] + if dataset == 'MMVet': + prompt = question + '\nAnswer the question directly. ' + elif DATASET_TYPE(dataset) == 'MCQ': + options = { + cand: line[cand] + for cand in string.ascii_uppercase + if cand in line and not pd.isna(line[cand]) + } + options_prompt = '' + for key, item in options.items(): + options_prompt += f'{key}. {item}\n' + + hint = line['hint'] if ('hint' in line and not pd.isna(line['hint'])) else None + prompt = f'Hint: {hint}\n' if hint is not None else '' + prompt += f'{question}\n' + prompt += ( + f'{options_prompt}\nAnswer with the option’s letter from the given choices directly. ' + if len(options) else 'Answer the question directly. ' + ) + else: + raise NotImplementedError + + message = [dict(type='text', value=prompt)] + message.extend([dict(type='image', value=s) for s in tgt_path]) + return message diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/llava_uhd_siglip2.py b/VLMEvalKit-sudoku/vlmeval/vlm/llava_uhd_siglip2.py new file mode 100644 index 0000000000000000000000000000000000000000..13aa6ce6d8eedb1af14ca3a2c01063a121236376 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/llava_uhd_siglip2.py @@ -0,0 +1,334 @@ +import torch +from PIL import Image +from abc import abstractproperty +import sys +import os.path as osp +from .base import BaseModel +from ..smp import * +from ..dataset import DATASET_TYPE +import copy +from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + +class LLaVA_UHD_SIGLIP2(BaseModel): + + INSTALL_REQ = True + INTERLEAVE = True + + def __init__(self, + model_path='', + **kwargs): + + try: + from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + from llava.model.builder import load_pretrained_model + from llava.utils import disable_torch_init + from torch.utils.data import Dataset, DataLoader + from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path, expand2square + except: + warnings.warn('Please install LLava_UHD before using LLava_UHD') + warnings.warn('Please install VLMEvalKit after installing LLava_UHD') + sys.exit(-1) + assert osp.exists(model_path) or len(model_path.split('/')) == 2 + self.system_prompt = ( + 'A chat between a curious human and an artificial intelligence assistant. ' + "The assistant gives helpful, detailed, and polite answers to the human's questions. " + ) + model_name = get_model_name_from_path(model_path) + try: + self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model( + model_path=model_path, + model_base=None, + model_name=model_name, + device='cpu', + device_map='cpu' + ) + # breakpoint() + ''' + for name, param in self.model.named_parameters(): + print(f"Parameter Name: {name}, Parameter Type: {param.dtype}") + ''' + except Exception as e: + warnings.warn(f'Error loading model: {e}') + sys.exit(-1) + self.model = self.model.cuda() + self.conv_mode = 'qwen_1_5' + kwargs_default = dict(do_sample=False, temperature=0, max_new_tokens=512, top_p=None, num_beams=3, use_cache=True) # noqa E501 + kwargs_default.update(kwargs) + self.kwargs = kwargs_default + + + def build_prompt(self, message, dataset=None): + if dataset==None: + dataset_type='VQA' + else: + dataset_type = DATASET_TYPE(dataset) + + prompt = "" + images = [] + + for msg in message: + if msg['type'] == 'image': + images.append(msg['value']) + elif msg['type'] == 'text': + prompt += msg['value'] + #prompt = prompt.split(' Please answer')[0] + if dataset_type == 'MCQ': + prompt += ( + '\n请直接回答选项字母。' if cn_string(prompt) else + "\nAnswer with the option's letter from the given choices directly." + ) + elif dataset_type == 'Y/N': + prompt += ( + '\n请用简单字母或短语回答问题' if cn_string(prompt) else + "\nAnswer the question using a single word or phrase." + ) + else: + prompt = prompt + # prompt += '\n请直接回答问题。' if cn_string(prompt) else '\nAnswer the question directly.' + + + message = [dict(type='image', value=img) for img in images] + message.append(dict(type='text', value=prompt)) + return message + + def preprocess(self, text, image, tokenizer, processor, model_config, conv_mode='qwen_1_5'): + from llava.slice_process import slice_image_minicpm, split_image, resize_image_keep_ratio + from llava.conversation import conv_templates, SeparatorStyle + from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path, expand2square + qs = text + if model_config.mm_use_im_start_end: + qs = self.system_prompt+self.DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs + else: + qs = self.system_prompt+DEFAULT_IMAGE_TOKEN + '\n' + qs + + + conv = conv_templates[conv_mode].copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() #加入占位符 + + res = 1024 + #全图 + scale_resolution = 1024 + never_split = True + + patch_size = 64 # patch size x merger size + image = resize_image_keep_ratio(image[0], max_size=res) + + source_image, patches, best_grid, ind_tokens = slice_image_minicpm( + image, max_slice_nums=7, scale_resolution = scale_resolution, patch_size=patch_size, never_split=never_split) + + #全图 + patches = [] + best_grid = None + ind_tokens = [] + + if best_grid is None: #说明没有切片 + source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False, + do_rescale=True, do_normalize=True, + return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w + crop_size = processor.crop_size + patch_tensors = torch.zeros(1, 3, crop_size['height'], crop_size['width']) + else: + source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False, + do_rescale=True, do_normalize=True, + return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w + patch_tensors = processor.preprocess(patches, do_resize=False, do_center_crop=False, + do_rescale=True, do_normalize=True, + return_tensors='pt')['pixel_values'] # num_slice, 3, s_h, s_w + + images = [source_tensors[0].half().cuda()] # 3, h, w + patch_images = [patch_tensors.half().cuda()] # bs, 3, h, w + + ind_tokens = [ind_tokens] + input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() + return input_ids, images, [image.size], patch_images, ind_tokens + + + def generate_inner(self, message, dataset=None): + + from llava.conversation import conv_templates, SeparatorStyle + # breakpoint() + content, images, ratio = '', [], [] + msg = self.build_prompt(message, dataset) + for item in msg: + if item['type'] == 'text': + content += item['value'] + elif item['type'] == 'image': + dataname = os.path.basename(os.path.dirname(item['value'])) + image = Image.open(item['value']).convert('RGB') + images.append(image) + + top_p = None + # breakpoint() + input_ids, image_tensor, image_sizes, patch_images, ind_tokens = self.preprocess(content, images, self.tokenizer, self.image_processor, self.model.config, conv_mode='qwen_1_5') + with torch.inference_mode(): + output_ids = self.model.generate( + input_ids, + images=image_tensor, + image_sizes=image_sizes, + patch_images=patch_images, + ind_tokens=ind_tokens, + **self.kwargs) + outputs = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip() + + return outputs + + +class LLaVA_UHD_SIGLIP2_SLICE(BaseModel): + + INSTALL_REQ = True + INTERLEAVE = True + + def __init__(self, + model_path='', + **kwargs): + + try: + from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN + from llava.model.builder import load_pretrained_model + from llava.utils import disable_torch_init + from torch.utils.data import Dataset, DataLoader + from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path, expand2square + except: + warnings.warn('Please install LLava_UHD before using LLava_UHD') + warnings.warn('Please install VLMEvalKit after installing LLava_UHD') + sys.exit(-1) + assert osp.exists(model_path) or len(model_path.split('/')) == 2 + self.system_prompt = ( + 'A chat between a curious human and an artificial intelligence assistant. ' + "The assistant gives helpful, detailed, and polite answers to the human's questions. " + ) + model_name = get_model_name_from_path(model_path) + try: + self.tokenizer, self.model, self.image_processor, self.context_len = load_pretrained_model( + model_path=model_path, + model_base=None, + model_name=model_name, + device='cpu', + device_map='cpu' + ) + ''' + for name, param in self.model.named_parameters(): + print(f"Parameter Name: {name}, Parameter Type: {param.dtype}") + ''' + except Exception as e: + warnings.warn(f'Error loading model: {e}') + sys.exit(-1) + self.model = self.model.cuda() + self.conv_mode = 'qwen_1_5' + kwargs_default = dict(do_sample=False, temperature=0, max_new_tokens=512, top_p=None, num_beams=3, use_cache=True) # noqa E501 + kwargs_default.update(kwargs) + self.kwargs = kwargs_default + + + def build_prompt(self, message, dataset=None): + if dataset==None: + dataset_type='VQA' + else: + dataset_type = DATASET_TYPE(dataset) + + prompt = "" + images = [] + for msg in message: + if msg['type'] == 'image': + images.append(msg['value']) + elif msg['type'] == 'text': + prompt += msg['value'] + #prompt = prompt.split(' Please answer')[0] + if dataset_type == 'MCQ': + prompt += ( + '\n请直接回答选项字母。' if cn_string(prompt) else + "\nAnswer with the option's letter from the given choices directly." + ) + elif dataset_type == 'Y/N': + prompt += ( + '\n请用简单字母或短语回答问题' if cn_string(prompt) else + "\nAnswer the question using a single word or phrase." + ) + else: + prompt = prompt + # prompt += '\n请直接回答问题。' if cn_string(prompt) else '\nAnswer the question directly.' + + + message = [dict(type='image', value=img) for img in images] + message.append(dict(type='text', value=prompt)) + return message + + def preprocess(self, text, image, tokenizer, processor, model_config, conv_mode='qwen_1_5'): + from llava.slice_process import slice_image_minicpm, split_image, resize_image_keep_ratio + from llava.conversation import conv_templates, SeparatorStyle + from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path, expand2square + qs = text + if model_config.mm_use_im_start_end: + qs = self.system_prompt+self.DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs + else: + qs = self.system_prompt+DEFAULT_IMAGE_TOKEN + '\n' + qs + + + conv = conv_templates[conv_mode].copy() + conv.append_message(conv.roles[0], qs) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() #加入占位符 + + res = 1024 + + #切片 336 + scale_resolution = 448 + never_split = False + + patch_size = 32 # patch size x merger size + image = resize_image_keep_ratio(image[0], max_size=res) + + source_image, patches, best_grid, ind_tokens = slice_image_minicpm( + image, max_slice_nums=7, scale_resolution = scale_resolution, patch_size=patch_size, never_split=never_split) + + if best_grid is None: #说明没有切片 + source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False, + do_rescale=True, do_normalize=True, + return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w + crop_size = processor.crop_size + patch_tensors = torch.zeros(1, 3, crop_size['height'], crop_size['width']) + else: + source_tensors = processor.preprocess(source_image, do_resize=False, do_center_crop=False, + do_rescale=True, do_normalize=True, + return_tensors='pt')['pixel_values'] # 1, 3, abs_h, abs_w + patch_tensors = processor.preprocess(patches, do_resize=False, do_center_crop=False, + do_rescale=True, do_normalize=True, + return_tensors='pt')['pixel_values'] # num_slice, 3, s_h, s_w + + images = [source_tensors[0].half().cuda()] # 3, h, w + patch_images = [patch_tensors.half().cuda()] # bs, 3, h, w + + ind_tokens = [ind_tokens] + input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda() + return input_ids, images, [image.size], patch_images, ind_tokens + + + def generate_inner(self, message, dataset=None): + # breakpoint() + from llava.conversation import conv_templates, SeparatorStyle + content, images, ratio = '', [], [] + msg = self.build_prompt(message, dataset) + for item in msg: + if item['type'] == 'text': + content += item['value'] + elif item['type'] == 'image': + dataname = os.path.basename(os.path.dirname(item['value'])) + image = Image.open(item['value']).convert('RGB') + images.append(image) + + top_p = None + input_ids, image_tensor, image_sizes, patch_images, ind_tokens = self.preprocess(content, images, self.tokenizer, self.image_processor, self.model.config, conv_mode='qwen_1_5') + with torch.inference_mode(): + output_ids = self.model.generate( + input_ids, + images=image_tensor, + image_sizes=image_sizes, + patch_images=patch_images, + ind_tokens=ind_tokens, + **self.kwargs) + + outputs = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].strip() + + return outputs diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/long_vita.py b/VLMEvalKit-sudoku/vlmeval/vlm/long_vita.py new file mode 100644 index 0000000000000000000000000000000000000000..e25b29b23ca2d19bfe8ef1009acacdf32590bdf0 --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/long_vita.py @@ -0,0 +1,817 @@ +# flake8: noqa +import os +import math +import numpy as np +from PIL import Image +from ..smp import * +from .base import BaseModel +from ..dataset import DATASET_TYPE +from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers.generation import GenerationConfig +import torch + + +IMG_TAG_TOKEN = "" +VID_TAG_TOKEN = " + elif '' in s: + ret = s.split('')[-1] + if ret is None: + ret = s + return ret + + +class WeThinkVL(Qwen2VLPromptMixin, BaseModel): + INSTALL_REQ = False + INTERLEAVE = True + VIDEO_LLM = True + + def __init__( + self, + model_path: str, + min_pixels: int | None = None, + max_pixels: int | None = None, + max_new_tokens=2048, + top_p=0.001, + top_k=1, + temperature=0.01, + repetition_penalty=1.0, + use_custom_prompt: bool = True, + system_prompt: str | None = None, + post_process: bool = False, + verbose: bool = False, + **kwargs, + ): + super().__init__(use_custom_prompt=use_custom_prompt) + self.min_pixels = min_pixels + self.max_pixels = max_pixels + self.generate_kwargs = dict( + max_new_tokens=max_new_tokens, + top_p=top_p, + top_k=top_k, + temperature=temperature, + repetition_penalty=repetition_penalty, + ) + self.generate_kwargs.update(kwargs) + self.system_prompt = system_prompt + self.verbose = verbose + self.post_process = post_process + self.fps = 2.0 + self.nframe = 64 + self.FRAME_FACTOR = 2 + assert model_path is not None + self.model_path = model_path + MODEL_CLS = None + from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor + MODEL_CLS = Qwen2_5_VLForConditionalGeneration + self.processor = AutoProcessor.from_pretrained(model_path) + gpu_mems = get_gpu_memory() + max_gpu_mem = max(gpu_mems) if gpu_mems != [] else -1 + assert max_gpu_mem > 0 + self.model = MODEL_CLS.from_pretrained( + model_path, torch_dtype='auto', device_map='cuda', attn_implementation='flash_attention_2' + ) + self.model.eval() + torch.cuda.empty_cache() + + def _prepare_content(self, inputs: list[dict[str, str]], dataset: str | None = None) -> list[dict[str, str]]: + """ + inputs list[dict[str, str]], each dict has keys: ['type', 'value'] + """ + content = [] + for s in inputs: + if s['type'] == 'image': + item = {'type': 'image', 'image': ensure_image_url(s['value'])} + if dataset == 'OCRBench': + item['min_pixels'] = 10 * 10 * 28 * 28 + warnings.warn(f"OCRBench dataset uses custom min_pixels={item['min_pixels']}") + if self.max_pixels is not None: + item['max_pixels'] = self.max_pixels + else: + if self.min_pixels is not None: + item['min_pixels'] = self.min_pixels + if self.max_pixels is not None: + item['max_pixels'] = self.max_pixels + elif s['type'] == 'video': + item = {'type': 'video', 'video': ensure_video_url(s['value'])} + if self.fps is not None: + item['fps'] = self.fps + elif self.nframe is not None: + import cv2 + video = cv2.VideoCapture(s['value']) + frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + video.release() + if frame_count < self.nframe: + new_frame_count = frame_count // self.FRAME_FACTOR * self.FRAME_FACTOR + print(f"use {new_frame_count} for {s['value']}") + item['nframes'] = new_frame_count + else: + item['nframes'] = self.nframe + elif s['type'] == 'text': + item = {'type': 'text', 'text': s['value']} + else: + raise ValueError(f"Invalid message type: {s['type']}, {s}") + content.append(item) + return content + + def generate_inner(self, message, dataset=None): + try: + from qwen_vl_utils import process_vision_info + except Exception as err: + logging.critical("qwen_vl_utils not found, please install it via 'pip install qwen-vl-utils'") + raise err + messages = [] + if self.system_prompt is not None: + if dataset not in ['OCRBench', "AI2D_TEST"]: + messages.append({'role': 'system', 'content': self.system_prompt}) + messages.append({'role': 'user', 'content': self._prepare_content(message, dataset=dataset)}) + if self.verbose: + print(f'\033[31m{messages}\033[0m') + text = self.processor.apply_chat_template([messages], tokenize=False, add_generation_prompt=True) + images, videos = process_vision_info([messages]) + inputs = self.processor(text=text, images=images, videos=videos, padding=True, return_tensors='pt') + inputs = inputs.to('cuda') + generated_ids = self.model.generate( + **inputs, + **self.generate_kwargs, + ) + generated_ids = [ + output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, generated_ids) + ] + out = self.processor.tokenizer.batch_decode( + generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False + ) + raw_response = out[0] + response = raw_response + if self.post_process or 'mmbench' in dataset.lower(): + # To evaluate mmbench_test without relying on ChatGPT for response parsing, + # we extract the content enclosed within and + response = extract_response_for_eval(raw_response, verbose=self.verbose) + if self.verbose: + print(f'\033[32m{response}\033[0m') + return response diff --git a/VLMEvalKit-sudoku/vlmeval/vlm/x_vl.py b/VLMEvalKit-sudoku/vlmeval/vlm/x_vl.py new file mode 100644 index 0000000000000000000000000000000000000000..46b89c8b4c3f22d50a467446be4c738f6a7d2bcc --- /dev/null +++ b/VLMEvalKit-sudoku/vlmeval/vlm/x_vl.py @@ -0,0 +1,61 @@ +import torch +from PIL import Image +import sys +import os.path as osp +from .base import BaseModel +from ..smp import * +from ..dataset import DATASET_TYPE, DATASET_MODALITY + + +class X_VL_HF(BaseModel): + INSTALL_REQ = True + INTERLEAVE = True + DEFAULT_IMAGE_TOKEN = "" + IMAGE_TOKEN_INDEX = -200 + + def __init__(self, model_path="YannQi/X-VL-4B", **kwargs): + from transformers import AutoProcessor, AutoModel + assert model_path is not None, "Model path must be provided." + self.model = AutoModel.from_pretrained( + model_path, + torch_dtype=torch.float16, + trust_remote_code=True, + ).to('cuda', torch.float16) + self.processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True) + + self.model_path = model_path + + def generate_inner_image(self, message, dataset=None): + content, images = "", [] + image_sizes = [] + + for msg in message: + if msg["type"] == "text": + content += msg["value"] + elif msg["type"] == "image": + img = Image.open(msg["value"]).convert("RGB") + images.append(img) + image_sizes.append(img.size) + content += self.DEFAULT_IMAGE_TOKEN + "\n" + + conversation = [ + { + "role": "user", + "content": [ + {"type": "text", "text": content}, + ], + } + ] + prompt = self.processor.apply_chat_template(conversation, add_generation_prompt=True) + inputs = self.processor(images=images, text=prompt, return_tensors="pt").to('cuda', torch.float16) + + output = self.model.generate(**inputs, max_new_tokens=16384, use_cache=True) + answer = self.processor.decode(output[0][inputs.input_ids.shape[1]:], skip_special_tokens=True) + answer = answer.split('')[-1].strip() + return answer + + def generate_inner(self, message, dataset=None): + if DATASET_MODALITY(dataset) == "VIDEO" and 'megabench' not in dataset.lower(): + raise NotImplementedError("Video generation is not supported yet.") + else: + return self.generate_inner_image(message, dataset)