ZzzHelloWorld commited on
Commit
003c62d
·
verified ·
1 Parent(s): a7dcfc9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. VLMEvalKit-sudoku/llava/__pycache__/__init__.cpython-310.pyc +0 -0
  2. VLMEvalKit-sudoku/llava/__pycache__/conversation.cpython-310.pyc +0 -0
  3. VLMEvalKit-sudoku/llava/__pycache__/slice_process.cpython-310.pyc +0 -0
  4. VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mistral.cpython-310.pyc +0 -0
  5. VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mixtral.cpython-310.pyc +0 -0
  6. VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_qwen.cpython-310.pyc +0 -0
  7. VLMEvalKit-sudoku/llava/model/language_model/llava_gemma.py +122 -0
  8. VLMEvalKit-sudoku/llava/model/language_model/llava_mixtral.py +143 -0
  9. VLMEvalKit-sudoku/llava/model/language_model/llava_qwen3.py +142 -0
  10. VLMEvalKit-sudoku/llava/model/language_model/llava_qwen_moe.py +149 -0
  11. VLMEvalKit-sudoku/llava/model/language_model/modeling_llama.py +1649 -0
  12. VLMEvalKit-sudoku/llava/model/multimodal_resampler/__pycache__/perceiver.cpython-310.pyc +0 -0
  13. VLMEvalKit-sudoku/llava/model/multimodal_resampler/qformer.py +1160 -0
  14. VLMEvalKit-sudoku/llava/model/multimodal_resampler/spatial_pool.py +45 -0
  15. VLMEvalKit-sudoku/llava/serve/cli.py +111 -0
  16. VLMEvalKit-sudoku/llava/serve/controller.py +287 -0
  17. VLMEvalKit-sudoku/llava/serve/test_message.py +59 -0
  18. VLMEvalKit-sudoku/llava/train/__pycache__/llava_trainer.cpython-310.pyc +0 -0
  19. VLMEvalKit-sudoku/llava/train/__pycache__/train.cpython-310.pyc +0 -0
  20. VLMEvalKit-sudoku/llava/train/llama_flash_attn_monkey_patch.py +87 -0
  21. VLMEvalKit-sudoku/llava/train/llava_trainer.py +557 -0
  22. VLMEvalKit-sudoku/llava/train/train.py +0 -0
  23. VLMEvalKit-sudoku/vlmeval/__init__.py +21 -0
  24. VLMEvalKit-sudoku/vlmeval/api/__pycache__/base.cpython-310.pyc +0 -0
  25. VLMEvalKit-sudoku/vlmeval/api/__pycache__/bluelm_api.cpython-310.pyc +0 -0
  26. VLMEvalKit-sudoku/vlmeval/api/__pycache__/cloudwalk.cpython-310.pyc +0 -0
  27. VLMEvalKit-sudoku/vlmeval/api/__pycache__/doubao_vl_api.cpython-310.pyc +0 -0
  28. VLMEvalKit-sudoku/vlmeval/api/__pycache__/gpt.cpython-310.pyc +0 -0
  29. VLMEvalKit-sudoku/vlmeval/api/__pycache__/mug_u.cpython-310.pyc +0 -0
  30. VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_api.cpython-310.pyc +0 -0
  31. VLMEvalKit-sudoku/vlmeval/api/__pycache__/sensechat_vision.cpython-310.pyc +0 -0
  32. VLMEvalKit-sudoku/vlmeval/api/__pycache__/siliconflow.cpython-310.pyc +0 -0
  33. VLMEvalKit-sudoku/vlmeval/api/__pycache__/taiyi.cpython-310.pyc +0 -0
  34. VLMEvalKit-sudoku/vlmeval/config.py +1659 -0
  35. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/cgbench.cpython-310.pyc +0 -0
  36. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dude.cpython-310.pyc +0 -0
  37. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dynamath.cpython-310.pyc +0 -0
  38. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_base.cpython-310.pyc +0 -0
  39. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_ccocr.cpython-310.pyc +0 -0
  40. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmgenbench.cpython-310.pyc +0 -0
  41. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmifeval.cpython-310.pyc +0 -0
  42. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmlongbench.cpython-310.pyc +0 -0
  43. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/moat.cpython-310.pyc +0 -0
  44. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mvbench.cpython-310.pyc +0 -0
  45. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/text_base.cpython-310.pyc +0 -0
  46. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/vl_rewardbench.cpython-310.pyc +0 -0
  47. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/wildvision.cpython-310.pyc +0 -0
  48. VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/worldsense.cpython-310.pyc +0 -0
  49. VLMEvalKit-sudoku/vlmeval/dataset/image_ccocr.py +303 -0
  50. VLMEvalKit-sudoku/vlmeval/dataset/mmgenbench.py +69 -0
VLMEvalKit-sudoku/llava/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
VLMEvalKit-sudoku/llava/__pycache__/conversation.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
VLMEvalKit-sudoku/llava/__pycache__/slice_process.cpython-310.pyc ADDED
Binary file (6.51 kB). View file
 
VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mistral.cpython-310.pyc ADDED
Binary file (4.02 kB). View file
 
VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_mixtral.cpython-310.pyc ADDED
Binary file (4.12 kB). View file
 
VLMEvalKit-sudoku/llava/model/language_model/__pycache__/llava_qwen.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
VLMEvalKit-sudoku/llava/model/language_model/llava_gemma.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Duc Q. Nguyen, Haotian Liu and Bo Li
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ from torch.nn import CrossEntropyLoss
21
+
22
+ from transformers import AutoConfig, AutoModelForCausalLM, GemmaConfig, GemmaModel, GemmaForCausalLM
23
+
24
+ from transformers.modeling_outputs import CausalLMOutputWithPast
25
+ from transformers.generation.utils import GenerateOutput
26
+
27
+ from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
28
+
29
+
30
+ class LlavaGemmaConfig(GemmaConfig):
31
+ model_type = "llava_gemma"
32
+
33
+
34
+ class LlavaGemmaModel(LlavaMetaModel, GemmaModel):
35
+ config_class = LlavaGemmaConfig
36
+
37
+ def __init__(self, config: GemmaConfig):
38
+ super(LlavaGemmaModel, self).__init__(config)
39
+
40
+
41
+ class LlavaGemmaForCausalLM(GemmaForCausalLM, LlavaMetaForCausalLM):
42
+ config_class = LlavaGemmaConfig
43
+
44
+ def __init__(self, config):
45
+ super(GemmaForCausalLM, self).__init__(config)
46
+ self.model = LlavaGemmaModel(config)
47
+
48
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
49
+
50
+ # Initialize weights and apply final processing
51
+ self.post_init()
52
+
53
+ def get_model(self):
54
+ return self.model
55
+
56
+ def forward(
57
+ self,
58
+ input_ids: torch.LongTensor = None,
59
+ attention_mask: Optional[torch.Tensor] = None,
60
+ position_ids: Optional[torch.LongTensor] = None,
61
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
62
+ inputs_embeds: Optional[torch.FloatTensor] = None,
63
+ labels: Optional[torch.LongTensor] = None,
64
+ use_cache: Optional[bool] = None,
65
+ output_attentions: Optional[bool] = None,
66
+ output_hidden_states: Optional[bool] = None,
67
+ images: Optional[torch.FloatTensor] = None,
68
+ image_sizes: Optional[List[List[int]]] = None,
69
+ return_dict: Optional[bool] = None,
70
+ cache_position: Optional[torch.LongTensor] = None,
71
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
72
+
73
+ if inputs_embeds is None:
74
+ (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, image_sizes)
75
+
76
+ return super().forward(
77
+ input_ids=input_ids,
78
+ attention_mask=attention_mask,
79
+ position_ids=position_ids,
80
+ past_key_values=past_key_values,
81
+ inputs_embeds=inputs_embeds,
82
+ labels=labels,
83
+ use_cache=use_cache,
84
+ output_attentions=output_attentions,
85
+ output_hidden_states=output_hidden_states,
86
+ return_dict=return_dict,
87
+ cache_position=cache_position,
88
+ )
89
+
90
+ @torch.no_grad()
91
+ def generate(
92
+ self,
93
+ inputs: Optional[torch.Tensor] = None,
94
+ images: Optional[torch.Tensor] = None,
95
+ image_sizes: Optional[torch.Tensor] = None,
96
+ **kwargs,
97
+ ) -> Union[GenerateOutput, torch.LongTensor]:
98
+ position_ids = kwargs.pop("position_ids", None)
99
+ attention_mask = kwargs.pop("attention_mask", None)
100
+ if "inputs_embeds" in kwargs:
101
+ raise NotImplementedError("`inputs_embeds` is not supported")
102
+
103
+ if images is not None:
104
+ (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, image_sizes=image_sizes)
105
+ else:
106
+ inputs_embeds = self.get_model().embed_tokens(inputs)
107
+
108
+ return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
109
+
110
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
111
+ images = kwargs.pop("images", None)
112
+ image_sizes = kwargs.pop("image_sizes", None)
113
+ inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
114
+ if images is not None:
115
+ inputs["images"] = images
116
+ if image_sizes is not None:
117
+ inputs["image_sizes"] = image_sizes
118
+ return inputs
119
+
120
+
121
+ AutoConfig.register("llava_gemma", LlavaGemmaConfig)
122
+ AutoModelForCausalLM.register(LlavaGemmaConfig, LlavaGemmaForCausalLM)
VLMEvalKit-sudoku/llava/model/language_model/llava_mixtral.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Haotian Liu
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import List, Optional, Tuple, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+ from torch.nn import CrossEntropyLoss
21
+
22
+ from transformers import AutoConfig, AutoModelForCausalLM, MixtralConfig, MixtralModel, MixtralForCausalLM, GenerationConfig
23
+
24
+ from transformers.modeling_outputs import CausalLMOutputWithPast
25
+ from transformers.generation.utils import GenerateOutput
26
+
27
+ from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
28
+
29
+
30
+ class LlavaMixtralConfig(MixtralConfig):
31
+ model_type = "llava_mixtral"
32
+
33
+
34
+ class LlavaMixtralModel(LlavaMetaModel, MixtralModel):
35
+ config_class = LlavaMixtralConfig
36
+
37
+ def __init__(self, config: MixtralConfig):
38
+ super(LlavaMixtralModel, self).__init__(config)
39
+
40
+
41
+ class LlavaMixtralForCausalLM(MixtralForCausalLM, LlavaMetaForCausalLM):
42
+ config_class = LlavaMixtralConfig
43
+
44
+ def __init__(self, config):
45
+ super(MixtralForCausalLM, self).__init__(config)
46
+
47
+ config.model_type = "llava_mixtral"
48
+ config.rope_scaling = None
49
+ self.model = LlavaMixtralModel(config)
50
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
51
+ # Initialize weights and apply final processing
52
+ self.post_init()
53
+
54
+ def get_model(self):
55
+ return self.model
56
+
57
+ def forward(
58
+ self,
59
+ input_ids: torch.LongTensor = None,
60
+ attention_mask: Optional[torch.Tensor] = None,
61
+ position_ids: Optional[torch.LongTensor] = None,
62
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
63
+ inputs_embeds: Optional[torch.FloatTensor] = None,
64
+ labels: Optional[torch.LongTensor] = None,
65
+ use_cache: Optional[bool] = None,
66
+ output_attentions: Optional[bool] = None,
67
+ output_hidden_states: Optional[bool] = None,
68
+ images: Optional[torch.FloatTensor] = None,
69
+ image_sizes: Optional[List[List[int]]] = None,
70
+ return_dict: Optional[bool] = None,
71
+ modalities: Optional[List[str]] = ["image"],
72
+ dpo_forward: Optional[bool] = None,
73
+ cache_position=None,
74
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
75
+
76
+ if inputs_embeds is None:
77
+ (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes)
78
+
79
+ if dpo_forward:
80
+ outputs = self.model(
81
+ input_ids=input_ids,
82
+ attention_mask=attention_mask,
83
+ position_ids=position_ids,
84
+ past_key_values=past_key_values,
85
+ inputs_embeds=inputs_embeds,
86
+ use_cache=use_cache,
87
+ output_attentions=output_attentions,
88
+ output_hidden_states=output_hidden_states,
89
+ return_dict=return_dict,
90
+ )
91
+
92
+ hidden_states = outputs[0]
93
+ logits = self.lm_head(hidden_states)
94
+ return logits, labels
95
+
96
+ else:
97
+ return super().forward(
98
+ input_ids=input_ids,
99
+ attention_mask=attention_mask,
100
+ position_ids=position_ids,
101
+ past_key_values=past_key_values,
102
+ inputs_embeds=inputs_embeds,
103
+ labels=labels,
104
+ use_cache=use_cache,
105
+ output_attentions=output_attentions,
106
+ output_hidden_states=output_hidden_states,
107
+ return_dict=return_dict,
108
+ )
109
+
110
+ @torch.no_grad()
111
+ def generate(
112
+ self,
113
+ inputs: Optional[torch.Tensor] = None,
114
+ images: Optional[torch.Tensor] = None,
115
+ image_sizes: Optional[torch.Tensor] = None,
116
+ modalities: Optional[List[str]] = ["image"],
117
+ **kwargs,
118
+ ) -> Union[GenerateOutput, torch.LongTensor]:
119
+ position_ids = kwargs.pop("position_ids", None)
120
+ attention_mask = kwargs.pop("attention_mask", None)
121
+ if "inputs_embeds" in kwargs:
122
+ raise NotImplementedError("`inputs_embeds` is not supported")
123
+
124
+ if images is not None:
125
+ (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes)
126
+ else:
127
+ inputs_embeds = self.get_model().embed_tokens(inputs)
128
+
129
+ return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
130
+
131
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
132
+ images = kwargs.pop("images", None)
133
+ image_sizes = kwargs.pop("image_sizes", None)
134
+ inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
135
+ if images is not None:
136
+ inputs["images"] = images
137
+ if image_sizes is not None:
138
+ inputs["image_sizes"] = image_sizes
139
+ return inputs
140
+
141
+
142
+ AutoConfig.register("llava_mixtral", LlavaMixtralConfig)
143
+ AutoModelForCausalLM.register(LlavaMixtralConfig, LlavaMixtralForCausalLM)
VLMEvalKit-sudoku/llava/model/language_model/llava_qwen3.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple, Union, Dict
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import CrossEntropyLoss
5
+
6
+ import transformers
7
+ from transformers import AutoConfig, AutoModelForCausalLM, LlamaConfig, LlamaModel, LlamaForCausalLM
8
+
9
+ from transformers.modeling_outputs import CausalLMOutputWithPast
10
+ from transformers.generation.utils import GenerateOutput
11
+
12
+ from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
13
+ from transformers import Qwen3Config, Qwen3Model, Qwen3ForCausalLM
14
+
15
+
16
+ class LlavaQwen3Config(Qwen3Config):
17
+ model_type = "llava_qwen3"
18
+
19
+
20
+ class LlavaQwen3Model(LlavaMetaModel, Qwen3Model):
21
+ config_class = LlavaQwen3Config
22
+
23
+ def __init__(self, config: Qwen3Config):
24
+ super(LlavaQwen3Model, self).__init__(config)
25
+
26
+
27
+ class LlavaQwen3ForCausalLM(Qwen3ForCausalLM, LlavaMetaForCausalLM):
28
+ config_class = LlavaQwen3Config
29
+
30
+ def __init__(self, config):
31
+ # super(Qwen3ForCausalLM, self).__init__(config)
32
+ Qwen3ForCausalLM.__init__(self, config)
33
+ config.model_type = "llava_qwen3"
34
+ config.rope_scaling = None
35
+ self.config = config
36
+ self.model = LlavaQwen3Model(config)
37
+ # self.llm_model = Qwen3Model(config)
38
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
39
+ # Initialize weights and apply final processing
40
+ self.post_init()
41
+
42
+ def get_model(self):
43
+ return self.model
44
+
45
+ def forward(
46
+ self,
47
+ input_ids: torch.LongTensor = None,
48
+ attention_mask: Optional[torch.Tensor] = None,
49
+ position_ids: Optional[torch.LongTensor] = None,
50
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
51
+ inputs_embeds: Optional[torch.FloatTensor] = None,
52
+ labels: Optional[torch.LongTensor] = None,
53
+ use_cache: Optional[bool] = None,
54
+ output_attentions: Optional[bool] = None,
55
+ output_hidden_states: Optional[bool] = None,
56
+ images: Optional[torch.FloatTensor] = None,
57
+ image_sizes: Optional[List[List[int]]] = None,
58
+ return_dict: Optional[bool] = None,
59
+ modalities: Optional[List[str]] = ["image"],
60
+ dpo_forward: Optional[bool] = False,
61
+ cache_position=None,
62
+ patch_images: Optional[torch.FloatTensor] = None,
63
+ ind_tokens: Optional[List[int]] = None,
64
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
65
+
66
+ if inputs_embeds is None:
67
+ (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes,patch_images=patch_images,
68
+ ind_tokens=ind_tokens)
69
+ if dpo_forward:
70
+ outputs = self.model(
71
+ input_ids=input_ids,
72
+ attention_mask=attention_mask,
73
+ position_ids=position_ids,
74
+ past_key_values=past_key_values,
75
+ inputs_embeds=inputs_embeds,
76
+ use_cache=use_cache,
77
+ output_attentions=output_attentions,
78
+ output_hidden_states=output_hidden_states,
79
+ return_dict=return_dict,
80
+ )
81
+
82
+ hidden_states = outputs[0]
83
+ logits = self.lm_head(hidden_states)
84
+ return logits, labels
85
+ else:
86
+ output = super().forward(
87
+ input_ids=input_ids,
88
+ attention_mask=attention_mask,
89
+ position_ids=position_ids,
90
+ past_key_values=past_key_values,
91
+ inputs_embeds=inputs_embeds,
92
+ labels=labels,
93
+ use_cache=use_cache,
94
+ output_attentions=output_attentions,
95
+ output_hidden_states=output_hidden_states,
96
+ return_dict=return_dict,
97
+ )
98
+ return output
99
+
100
+ @torch.no_grad()
101
+ def generate(
102
+ self,
103
+ inputs: Optional[torch.Tensor] = None,
104
+ images: Optional[torch.Tensor] = None,
105
+ image_sizes: Optional[torch.Tensor] = None,
106
+ modalities: Optional[List[str]] = ["image"],
107
+ patch_images: Optional[torch.FloatTensor] = None,
108
+ ind_tokens: Optional[List[int]] = None,
109
+ **kwargs,
110
+ ) -> Union[GenerateOutput, torch.LongTensor]:
111
+ position_ids = kwargs.pop("position_ids", None)
112
+ attention_mask = kwargs.pop("attention_mask", None)
113
+ if "inputs_embeds" in kwargs:
114
+ raise NotImplementedError("`inputs_embeds` is not supported")
115
+
116
+ if images is not None:
117
+ (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes, patch_images=patch_images,
118
+ ind_tokens=ind_tokens)
119
+ else:
120
+ inputs_embeds = self.get_model().embed_tokens(inputs)
121
+
122
+ return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
123
+
124
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
125
+ images = kwargs.pop("images", None)
126
+ image_sizes = kwargs.pop("image_sizes", None)
127
+ patch_images = kwargs.pop("patch_images", None)
128
+ ind_tokens = kwargs.pop("ind_tokens", None)
129
+ inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
130
+ if images is not None:
131
+ inputs["images"] = images
132
+ if image_sizes is not None:
133
+ inputs["image_sizes"] = image_sizes
134
+ if patch_images is not None:
135
+ inputs['patch_images'] = patch_images
136
+ if ind_tokens is not None:
137
+ inputs['ind_tokens'] = ind_tokens
138
+ return inputs
139
+
140
+
141
+ AutoConfig.register("llava_qwen3", LlavaQwen3Config)
142
+ AutoModelForCausalLM.register(LlavaQwen3Config, LlavaQwen3ForCausalLM)
VLMEvalKit-sudoku/llava/model/language_model/llava_qwen_moe.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Hao Zhang
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from typing import List, Optional, Tuple, Union, Dict
17
+ import torch
18
+ import torch.nn as nn
19
+ from torch.nn import CrossEntropyLoss
20
+
21
+ import transformers
22
+ from transformers import AutoConfig, AutoModelForCausalLM
23
+
24
+ from transformers.modeling_outputs import CausalLMOutputWithPast
25
+ from transformers.generation.utils import GenerateOutput
26
+
27
+ # from ...constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
28
+ from llava.model.llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
29
+ from transformers import Qwen2MoeConfig, Qwen2MoeModel, Qwen2MoeForCausalLM
30
+
31
+ # from .qwen.modeling_qwen import QWenLMHeadModel, QWenModel
32
+ # from .qwen.configuration_qwen import QWenConfig
33
+
34
+
35
+ class LlavaQwenMoeConfig(Qwen2MoeConfig):
36
+ model_type = "llava_qwen_moe"
37
+
38
+
39
+ class LlavaQwenMoeModel(LlavaMetaModel, Qwen2MoeModel):
40
+ config_class = LlavaQwenMoeConfig
41
+
42
+ def __init__(self, config: Qwen2MoeConfig):
43
+ super(LlavaQwenMoeModel, self).__init__(config)
44
+
45
+
46
+ class LlavaQwenMoeForCausalLM(Qwen2MoeForCausalLM, LlavaMetaForCausalLM):
47
+ config_class = LlavaQwenMoeConfig
48
+
49
+ def __init__(self, config):
50
+ # super(Qwen2MoeForCausalLM, self).__init__(config)
51
+ Qwen2MoeForCausalLM.__init__(self, config)
52
+ config.model_type = "llava_qwen_moe"
53
+ config.rope_scaling = None
54
+
55
+ self.model = LlavaQwenMoeModel(config)
56
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
57
+ # Initialize weights and apply final processing
58
+ self.post_init()
59
+
60
+ def get_model(self):
61
+ return self.model
62
+
63
+ def forward(
64
+ self,
65
+ input_ids: torch.LongTensor = None,
66
+ attention_mask: Optional[torch.Tensor] = None,
67
+ position_ids: Optional[torch.LongTensor] = None,
68
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
69
+ inputs_embeds: Optional[torch.FloatTensor] = None,
70
+ labels: Optional[torch.LongTensor] = None,
71
+ use_cache: Optional[bool] = None,
72
+ output_attentions: Optional[bool] = None,
73
+ output_hidden_states: Optional[bool] = None,
74
+ images: Optional[torch.FloatTensor] = None,
75
+ image_sizes: Optional[List[List[int]]] = None,
76
+ return_dict: Optional[bool] = None,
77
+ modalities: Optional[List[str]] = ["image"],
78
+ dpo_forward: Optional[bool] = False,
79
+ cache_position=None,
80
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
81
+
82
+ if inputs_embeds is None:
83
+ (input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes)
84
+
85
+ if dpo_forward:
86
+ outputs = self.model(
87
+ input_ids=input_ids,
88
+ attention_mask=attention_mask,
89
+ position_ids=position_ids,
90
+ past_key_values=past_key_values,
91
+ inputs_embeds=inputs_embeds,
92
+ use_cache=use_cache,
93
+ output_attentions=output_attentions,
94
+ output_hidden_states=output_hidden_states,
95
+ return_dict=return_dict,
96
+ )
97
+
98
+ hidden_states = outputs[0]
99
+ logits = self.lm_head(hidden_states)
100
+ return logits, labels
101
+
102
+ else:
103
+ return super().forward(
104
+ input_ids=input_ids,
105
+ attention_mask=attention_mask,
106
+ position_ids=position_ids,
107
+ past_key_values=past_key_values,
108
+ inputs_embeds=inputs_embeds,
109
+ labels=labels,
110
+ use_cache=use_cache,
111
+ output_attentions=output_attentions,
112
+ output_hidden_states=output_hidden_states,
113
+ return_dict=return_dict,
114
+ )
115
+
116
+ @torch.no_grad()
117
+ def generate(
118
+ self,
119
+ inputs: Optional[torch.Tensor] = None,
120
+ images: Optional[torch.Tensor] = None,
121
+ image_sizes: Optional[torch.Tensor] = None,
122
+ modalities: Optional[List[str]] = ["image"],
123
+ **kwargs,
124
+ ) -> Union[GenerateOutput, torch.LongTensor]:
125
+ position_ids = kwargs.pop("position_ids", None)
126
+ attention_mask = kwargs.pop("attention_mask", None)
127
+ if "inputs_embeds" in kwargs:
128
+ raise NotImplementedError("`inputs_embeds` is not supported")
129
+
130
+ if images is not None:
131
+ (inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes)
132
+ else:
133
+ inputs_embeds = self.get_model().embed_tokens(inputs)
134
+
135
+ return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
136
+
137
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
138
+ images = kwargs.pop("images", None)
139
+ image_sizes = kwargs.pop("image_sizes", None)
140
+ inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
141
+ if images is not None:
142
+ inputs["images"] = images
143
+ if image_sizes is not None:
144
+ inputs["image_sizes"] = image_sizes
145
+ return inputs
146
+
147
+
148
+ AutoConfig.register("llava_qwen_moe", LlavaQwenMoeConfig)
149
+ AutoModelForCausalLM.register(LlavaQwenMoeConfig, LlavaQwenMoeForCausalLM)
VLMEvalKit-sudoku/llava/model/language_model/modeling_llama.py ADDED
@@ -0,0 +1,1649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ PyTorch LLaMA model."""
21
+ import math
22
+ import warnings
23
+ from typing import List, Optional, Tuple, Union
24
+
25
+ import torch
26
+ import torch.nn.functional as F
27
+ import torch.utils.checkpoint
28
+ from torch import nn
29
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
30
+
31
+ from transformers.activations import ACT2FN
32
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
33
+ from transformers.modeling_outputs import (
34
+ BaseModelOutputWithPast,
35
+ CausalLMOutputWithPast,
36
+ QuestionAnsweringModelOutput,
37
+ SequenceClassifierOutputWithPast,
38
+ )
39
+ from transformers.modeling_utils import PreTrainedModel
40
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
41
+ from transformers.utils import (
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ is_flash_attn_2_available,
45
+ is_flash_attn_greater_or_equal_2_10,
46
+ logging,
47
+ replace_return_docstrings,
48
+ )
49
+ from transformers.models.llama.configuration_llama import LlamaConfig
50
+
51
+ if is_flash_attn_2_available():
52
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
53
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
54
+
55
+
56
+ logger = logging.get_logger(__name__)
57
+
58
+ _CONFIG_FOR_DOC = "LlamaConfig"
59
+
60
+
61
+ def _get_unpad_data(attention_mask):
62
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
63
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
64
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
65
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
66
+ return (
67
+ indices,
68
+ cu_seqlens,
69
+ max_seqlen_in_batch,
70
+ )
71
+
72
+
73
+ class LlamaRMSNorm(nn.Module):
74
+ def __init__(self, hidden_size, eps=1e-6):
75
+ """
76
+ LlamaRMSNorm is equivalent to T5LayerNorm
77
+ """
78
+ super().__init__()
79
+ self.weight = nn.Parameter(torch.ones(hidden_size))
80
+ self.variance_epsilon = eps
81
+
82
+ def forward(self, hidden_states):
83
+ input_dtype = hidden_states.dtype
84
+ hidden_states = hidden_states.to(torch.float32)
85
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
86
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
87
+ return self.weight * hidden_states.to(input_dtype)
88
+
89
+
90
+ ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm)
91
+
92
+
93
+ class LlamaRotaryEmbedding(nn.Module):
94
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
95
+ super().__init__()
96
+ self.scaling_factor = scaling_factor
97
+ self.dim = dim
98
+ self.max_position_embeddings = max_position_embeddings
99
+ self.base = base
100
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
101
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
102
+ # For BC we register cos and sin cached
103
+ self.max_seq_len_cached = max_position_embeddings
104
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
105
+ t = t / self.scaling_factor
106
+ freqs = torch.outer(t, self.inv_freq)
107
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
108
+ emb = torch.cat((freqs, freqs), dim=-1)
109
+ self.register_buffer("_cos_cached", emb.cos().to(torch.get_default_dtype()), persistent=False)
110
+ self.register_buffer("_sin_cached", emb.sin().to(torch.get_default_dtype()), persistent=False)
111
+
112
+ @property
113
+ def sin_cached(self):
114
+ logger.warning_once("The sin_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use " "the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class")
115
+ return self._sin_cached
116
+
117
+ @property
118
+ def cos_cached(self):
119
+ logger.warning_once("The cos_cached attribute will be removed in 4.39. Bear in mind that its contents changed in v4.38. Use " "the forward method of RoPE from now on instead. It is not used in the `LlamaAttention` class")
120
+ return self._cos_cached
121
+
122
+ @torch.no_grad()
123
+ def forward(self, x, position_ids, seq_len=None):
124
+ if seq_len is not None:
125
+ logger.warning_once("The `seq_len` argument is deprecated and unused. It will be removed in v4.39.")
126
+
127
+ # x: [bs, num_attention_heads, seq_len, head_size]
128
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
129
+ position_ids_expanded = position_ids[:, None, :].float()
130
+ # Force float32 since bfloat16 loses precision on long contexts
131
+ # See https://github.com/huggingface/transformers/pull/29285
132
+ device_type = x.device.type
133
+ device_type = device_type if isinstance(device_type, str) else "cpu"
134
+ with torch.autocast(device_type=device_type, enabled=False):
135
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
136
+ emb = torch.cat((freqs, freqs), dim=-1)
137
+ cos = emb.cos()
138
+ sin = emb.sin()
139
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
140
+
141
+
142
+ class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):
143
+ """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
144
+
145
+ def forward(self, x, position_ids, seq_len=None):
146
+ # difference to the original RoPE: a scaling factor is aplied to the position ids
147
+ position_ids = position_ids.float() / self.scaling_factor
148
+ cos, sin = super().forward(x, position_ids, seq_len)
149
+ return cos, sin
150
+
151
+
152
+ class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):
153
+ """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
154
+
155
+ def forward(self, x, position_ids, seq_len=None):
156
+ # difference to the original RoPE: inv_freq is recomputed when the sequence length > original length
157
+ seq_len = torch.max(position_ids) + 1
158
+ if seq_len > self.max_position_embeddings:
159
+ base = self.base * ((self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)) ** (self.dim / (self.dim - 2))
160
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(x.device) / self.dim))
161
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: this may break with compilation
162
+
163
+ cos, sin = super().forward(x, position_ids, seq_len)
164
+ return cos, sin
165
+
166
+
167
+ def rotate_half(x):
168
+ """Rotates half the hidden dims of the input."""
169
+ x1 = x[..., : x.shape[-1] // 2]
170
+ x2 = x[..., x.shape[-1] // 2 :]
171
+ return torch.cat((-x2, x1), dim=-1)
172
+
173
+
174
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
175
+ """Applies Rotary Position Embedding to the query and key tensors.
176
+
177
+ Args:
178
+ q (`torch.Tensor`): The query tensor.
179
+ k (`torch.Tensor`): The key tensor.
180
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
181
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
182
+ position_ids (`torch.Tensor`, *optional*):
183
+ Deprecated and unused.
184
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
185
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
186
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
187
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
188
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
189
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
190
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
191
+ Returns:
192
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
193
+ """
194
+ cos = cos.unsqueeze(unsqueeze_dim)
195
+ sin = sin.unsqueeze(unsqueeze_dim)
196
+ q_embed = (q * cos) + (rotate_half(q) * sin)
197
+ k_embed = (k * cos) + (rotate_half(k) * sin)
198
+ return q_embed, k_embed
199
+
200
+
201
+ class LlamaMLP(nn.Module):
202
+ def __init__(self, config):
203
+ super().__init__()
204
+ self.config = config
205
+ self.hidden_size = config.hidden_size
206
+ self.intermediate_size = config.intermediate_size
207
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
208
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
209
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
210
+ self.act_fn = ACT2FN[config.hidden_act]
211
+
212
+ def forward(self, x):
213
+ if self.config.pretraining_tp > 1:
214
+ slice = self.intermediate_size // self.config.pretraining_tp
215
+ gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
216
+ up_proj_slices = self.up_proj.weight.split(slice, dim=0)
217
+ down_proj_slices = self.down_proj.weight.split(slice, dim=1)
218
+
219
+ gate_proj = torch.cat([F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
220
+ up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
221
+
222
+ intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
223
+ down_proj = [F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)]
224
+ down_proj = sum(down_proj)
225
+ else:
226
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
227
+
228
+ return down_proj
229
+
230
+
231
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
232
+ """
233
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
234
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
235
+ """
236
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
237
+ if n_rep == 1:
238
+ return hidden_states
239
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
240
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
241
+
242
+
243
+ class LlamaAttention(nn.Module):
244
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
245
+
246
+ def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None):
247
+ super().__init__()
248
+ self.config = config
249
+ self.layer_idx = layer_idx
250
+ if layer_idx is None:
251
+ logger.warning_once(
252
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
253
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
254
+ "when creating this class."
255
+ )
256
+
257
+ self.attention_dropout = config.attention_dropout
258
+ self.hidden_size = config.hidden_size
259
+ self.num_heads = config.num_attention_heads
260
+ self.head_dim = self.hidden_size // self.num_heads
261
+ self.num_key_value_heads = config.num_key_value_heads
262
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
263
+ self.max_position_embeddings = config.max_position_embeddings
264
+ self.rope_theta = config.rope_theta
265
+ self.is_causal = True
266
+
267
+ if (self.head_dim * self.num_heads) != self.hidden_size:
268
+ raise ValueError(f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads}).")
269
+
270
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
271
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
272
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
273
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
274
+ self._init_rope()
275
+
276
+ def _init_rope(self):
277
+ if self.config.rope_scaling is None:
278
+ self.rotary_emb = LlamaRotaryEmbedding(
279
+ self.head_dim,
280
+ max_position_embeddings=self.max_position_embeddings,
281
+ base=self.rope_theta,
282
+ )
283
+ else:
284
+ scaling_type = self.config.rope_scaling["type"]
285
+ scaling_factor = self.config.rope_scaling["factor"]
286
+ if scaling_type == "linear":
287
+ self.rotary_emb = LlamaLinearScalingRotaryEmbedding(
288
+ self.head_dim,
289
+ max_position_embeddings=self.max_position_embeddings,
290
+ scaling_factor=scaling_factor,
291
+ base=self.rope_theta,
292
+ )
293
+ elif scaling_type == "dynamic":
294
+ self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(
295
+ self.head_dim,
296
+ max_position_embeddings=self.max_position_embeddings,
297
+ scaling_factor=scaling_factor,
298
+ base=self.rope_theta,
299
+ )
300
+ else:
301
+ raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
302
+
303
+ def forward(
304
+ self,
305
+ hidden_states: torch.Tensor,
306
+ attention_mask: Optional[torch.Tensor] = None,
307
+ position_ids: Optional[torch.LongTensor] = None,
308
+ past_key_value: Optional[Cache] = None,
309
+ output_attentions: bool = False,
310
+ use_cache: bool = False,
311
+ cache_position: Optional[torch.LongTensor] = None,
312
+ **kwargs,
313
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
314
+ bsz, q_len, _ = hidden_states.size()
315
+
316
+ if self.config.pretraining_tp > 1:
317
+ key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
318
+ query_slices = self.q_proj.weight.split((self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0)
319
+ key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
320
+ value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
321
+
322
+ query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
323
+ query_states = torch.cat(query_states, dim=-1)
324
+
325
+ key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
326
+ key_states = torch.cat(key_states, dim=-1)
327
+
328
+ value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
329
+ value_states = torch.cat(value_states, dim=-1)
330
+
331
+ else:
332
+ query_states = self.q_proj(hidden_states)
333
+ key_states = self.k_proj(hidden_states)
334
+ value_states = self.v_proj(hidden_states)
335
+
336
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
337
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
338
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
339
+
340
+ past_key_value = getattr(self, "past_key_value", past_key_value)
341
+ cos, sin = self.rotary_emb(value_states, position_ids)
342
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
343
+
344
+ if past_key_value is not None:
345
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
346
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
347
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
348
+
349
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
350
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
351
+
352
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
353
+
354
+ if attention_mask is not None: # no matter the length, we just slice it
355
+ causal_mask = attention_mask
356
+ if cache_position is not None:
357
+ causal_mask = attention_mask[:, :, cache_position, : key_states.shape[-2]]
358
+ attn_weights = attn_weights + causal_mask
359
+
360
+ # upcast attention to fp32
361
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
362
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
363
+ attn_output = torch.matmul(attn_weights, value_states)
364
+
365
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
366
+ raise ValueError(f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}")
367
+
368
+ attn_output = attn_output.transpose(1, 2).contiguous()
369
+
370
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
371
+
372
+ if self.config.pretraining_tp > 1:
373
+ attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
374
+ o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
375
+ attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
376
+ else:
377
+ attn_output = self.o_proj(attn_output)
378
+
379
+ if not output_attentions:
380
+ attn_weights = None
381
+
382
+ return attn_output, attn_weights, past_key_value
383
+
384
+
385
+ class LlamaRingFlashAttention2(LlamaAttention):
386
+ """
387
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
388
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
389
+ flash attention and deal with padding tokens in case the input contains any of them.
390
+ """
391
+
392
+ def __init__(self, *args, **kwargs):
393
+ super().__init__(*args, **kwargs)
394
+
395
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
396
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
397
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
398
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
399
+
400
+ def forward(
401
+ self,
402
+ hidden_states: torch.Tensor,
403
+ attention_mask: Optional[torch.LongTensor] = None,
404
+ position_ids: Optional[torch.LongTensor] = None,
405
+ past_key_value: Optional[Cache] = None,
406
+ output_attentions: bool = False,
407
+ use_cache: bool = False,
408
+ cache_position: Optional[torch.LongTensor] = None,
409
+ **kwargs,
410
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
411
+ output_attentions = False
412
+
413
+ bsz, q_len, _ = hidden_states.size()
414
+
415
+ query_states = self.q_proj(hidden_states)
416
+ key_states = self.k_proj(hidden_states)
417
+ value_states = self.v_proj(hidden_states)
418
+
419
+ # Flash attention requires the input to have the shape
420
+ # batch_size x seq_length x head_dim x hidden_dim
421
+ # therefore we just need to keep the original shape
422
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
423
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
424
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
425
+
426
+ cos, sin = self.rotary_emb(value_states, position_ids)
427
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
428
+
429
+ past_key_value = getattr(self, "past_key_value", past_key_value)
430
+
431
+ if past_key_value is not None:
432
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
433
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
434
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
435
+
436
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
437
+ # to be able to avoid many of these transpose/reshape/view.
438
+ query_states = query_states.transpose(1, 2)
439
+ key_states = key_states.transpose(1, 2)
440
+ value_states = value_states.transpose(1, 2)
441
+
442
+ dropout_rate = self.attention_dropout if self.training else 0.0
443
+
444
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
445
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
446
+ # cast them back in the correct dtype just to be sure everything works as expected.
447
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
448
+ # in fp32. (LlamaRMSNorm handles it correctly)
449
+
450
+ input_dtype = query_states.dtype
451
+ if input_dtype == torch.float32:
452
+ if torch.is_autocast_enabled():
453
+ target_dtype = torch.get_autocast_gpu_dtype()
454
+ # Handle the case where the model is quantized
455
+ elif hasattr(self.config, "_pre_quantization_dtype"):
456
+ target_dtype = self.config._pre_quantization_dtype
457
+ else:
458
+ target_dtype = self.q_proj.weight.dtype
459
+
460
+ logger.warning_once(
461
+ f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}."
462
+ )
463
+
464
+ query_states = query_states.to(target_dtype)
465
+ key_states = key_states.to(target_dtype)
466
+ value_states = value_states.to(target_dtype)
467
+
468
+ attn_output = self._flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate)
469
+
470
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
471
+ attn_output = self.o_proj(attn_output)
472
+
473
+ if not output_attentions:
474
+ attn_weights = None
475
+
476
+ return attn_output, attn_weights, past_key_value
477
+
478
+ def _flash_attention_forward(self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None):
479
+ """
480
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
481
+ first unpad the input, then computes the attention scores and pad the final attention scores.
482
+
483
+ Args:
484
+ query_states (`torch.Tensor`):
485
+ Input query states to be passed to Flash Attention API
486
+ key_states (`torch.Tensor`):
487
+ Input key states to be passed to Flash Attention API
488
+ value_states (`torch.Tensor`):
489
+ Input value states to be passed to Flash Attention API
490
+ attention_mask (`torch.Tensor`):
491
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
492
+ position of padding tokens and 1 for the position of non-padding tokens.
493
+ dropout (`int`, *optional*):
494
+ Attention dropout
495
+ softmax_scale (`float`, *optional*):
496
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
497
+ """
498
+ if not self._flash_attn_uses_top_left_mask:
499
+ causal = self.is_causal
500
+ else:
501
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
502
+ causal = self.is_causal and query_length != 1
503
+
504
+ # Contains at least one padding token in the sequence
505
+ if attention_mask is not None:
506
+ batch_size = query_states.shape[0]
507
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(query_states, key_states, value_states, attention_mask, query_length)
508
+
509
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
510
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
511
+
512
+ attn_output_unpad = zigzag_ring_flash_attn_varlen_func(
513
+ query_states,
514
+ key_states,
515
+ value_states,
516
+ cu_seqlens_q=cu_seqlens_q,
517
+ cu_seqlens_k=cu_seqlens_k,
518
+ max_seqlen_q=max_seqlen_in_batch_q,
519
+ max_seqlen_k=max_seqlen_in_batch_k,
520
+ dropout_p=dropout,
521
+ softmax_scale=softmax_scale,
522
+ causal=causal,
523
+ )
524
+
525
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
526
+ else:
527
+ # pack qkv
528
+ # query_states: (batch_size, seqlen, nheads, headdim)
529
+ # qkv: (batch_size, seqlen, 3, nheads, headdim)
530
+ qkv = torch.stack([query_states, key_states, value_states], dim=2)
531
+ attn_output = zigzag_ring_flash_attn_qkvpacked_func(qkv, dropout, softmax_scale, causal=causal)
532
+
533
+ return attn_output
534
+
535
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
536
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
537
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
538
+
539
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k)
540
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k)
541
+ if query_length == kv_seq_len:
542
+ query_layer = index_first_axis(query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k)
543
+ cu_seqlens_q = cu_seqlens_k
544
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
545
+ indices_q = indices_k
546
+ elif query_length == 1:
547
+ max_seqlen_in_batch_q = 1
548
+ cu_seqlens_q = torch.arange(batch_size + 1, dtype=torch.int32, device=query_layer.device) # There is a memcpy here, that is very bad.
549
+ indices_q = cu_seqlens_q[:-1]
550
+ query_layer = query_layer.squeeze(1)
551
+ else:
552
+ # The -q_len: slice assumes left padding.
553
+ attention_mask = attention_mask[:, -query_length:]
554
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
555
+
556
+ return (
557
+ query_layer,
558
+ key_layer,
559
+ value_layer,
560
+ indices_q,
561
+ (cu_seqlens_q, cu_seqlens_k),
562
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
563
+ )
564
+
565
+
566
+ class LlamaFlashAttention2(LlamaAttention):
567
+ """
568
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
569
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
570
+ flash attention and deal with padding tokens in case the input contains any of them.
571
+ """
572
+
573
+ def __init__(self, *args, **kwargs):
574
+ super().__init__(*args, **kwargs)
575
+
576
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
577
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
578
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
579
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
580
+
581
+ def forward(
582
+ self,
583
+ hidden_states: torch.Tensor,
584
+ attention_mask: Optional[torch.LongTensor] = None,
585
+ position_ids: Optional[torch.LongTensor] = None,
586
+ past_key_value: Optional[Cache] = None,
587
+ output_attentions: bool = False,
588
+ use_cache: bool = False,
589
+ cache_position: Optional[torch.LongTensor] = None,
590
+ **kwargs,
591
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
592
+ output_attentions = False
593
+
594
+ bsz, q_len, _ = hidden_states.size()
595
+
596
+ query_states = self.q_proj(hidden_states)
597
+ key_states = self.k_proj(hidden_states)
598
+ value_states = self.v_proj(hidden_states)
599
+
600
+ # Flash attention requires the input to have the shape
601
+ # batch_size x seq_length x head_dim x hidden_dim
602
+ # therefore we just need to keep the original shape
603
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
604
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
605
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
606
+
607
+ cos, sin = self.rotary_emb(value_states, position_ids)
608
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
609
+
610
+ past_key_value = getattr(self, "past_key_value", past_key_value)
611
+
612
+ if past_key_value is not None:
613
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
614
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
615
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
616
+
617
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
618
+ # to be able to avoid many of these transpose/reshape/view.
619
+ query_states = query_states.transpose(1, 2)
620
+ key_states = key_states.transpose(1, 2)
621
+ value_states = value_states.transpose(1, 2)
622
+
623
+ dropout_rate = self.attention_dropout if self.training else 0.0
624
+
625
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
626
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
627
+ # cast them back in the correct dtype just to be sure everything works as expected.
628
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
629
+ # in fp32. (LlamaRMSNorm handles it correctly)
630
+
631
+ input_dtype = query_states.dtype
632
+ if input_dtype == torch.float32:
633
+ if torch.is_autocast_enabled():
634
+ target_dtype = torch.get_autocast_gpu_dtype()
635
+ # Handle the case where the model is quantized
636
+ elif hasattr(self.config, "_pre_quantization_dtype"):
637
+ target_dtype = self.config._pre_quantization_dtype
638
+ else:
639
+ target_dtype = self.q_proj.weight.dtype
640
+
641
+ logger.warning_once(
642
+ f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}."
643
+ )
644
+
645
+ query_states = query_states.to(target_dtype)
646
+ key_states = key_states.to(target_dtype)
647
+ value_states = value_states.to(target_dtype)
648
+
649
+ attn_output = self._flash_attention_forward(query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate)
650
+
651
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
652
+ attn_output = self.o_proj(attn_output)
653
+
654
+ if not output_attentions:
655
+ attn_weights = None
656
+
657
+ return attn_output, attn_weights, past_key_value
658
+
659
+ def _flash_attention_forward(self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None):
660
+ """
661
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
662
+ first unpad the input, then computes the attention scores and pad the final attention scores.
663
+
664
+ Args:
665
+ query_states (`torch.Tensor`):
666
+ Input query states to be passed to Flash Attention API
667
+ key_states (`torch.Tensor`):
668
+ Input key states to be passed to Flash Attention API
669
+ value_states (`torch.Tensor`):
670
+ Input value states to be passed to Flash Attention API
671
+ attention_mask (`torch.Tensor`):
672
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
673
+ position of padding tokens and 1 for the position of non-padding tokens.
674
+ dropout (`int`, *optional*):
675
+ Attention dropout
676
+ softmax_scale (`float`, *optional*):
677
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
678
+ """
679
+ if not self._flash_attn_uses_top_left_mask:
680
+ causal = self.is_causal
681
+ else:
682
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
683
+ causal = self.is_causal and query_length != 1
684
+
685
+ # Contains at least one padding token in the sequence
686
+ if attention_mask is not None:
687
+ batch_size = query_states.shape[0]
688
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(query_states, key_states, value_states, attention_mask, query_length)
689
+
690
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
691
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
692
+
693
+ attn_output_unpad = flash_attn_varlen_func(
694
+ query_states,
695
+ key_states,
696
+ value_states,
697
+ cu_seqlens_q=cu_seqlens_q,
698
+ cu_seqlens_k=cu_seqlens_k,
699
+ max_seqlen_q=max_seqlen_in_batch_q,
700
+ max_seqlen_k=max_seqlen_in_batch_k,
701
+ dropout_p=dropout,
702
+ softmax_scale=softmax_scale,
703
+ causal=causal,
704
+ )
705
+
706
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
707
+ else:
708
+ attn_output = flash_attn_func(query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal)
709
+
710
+ return attn_output
711
+
712
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
713
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
714
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
715
+
716
+ key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k)
717
+ value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k)
718
+ if query_length == kv_seq_len:
719
+ query_layer = index_first_axis(query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k)
720
+ cu_seqlens_q = cu_seqlens_k
721
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
722
+ indices_q = indices_k
723
+ elif query_length == 1:
724
+ max_seqlen_in_batch_q = 1
725
+ cu_seqlens_q = torch.arange(batch_size + 1, dtype=torch.int32, device=query_layer.device) # There is a memcpy here, that is very bad.
726
+ indices_q = cu_seqlens_q[:-1]
727
+ query_layer = query_layer.squeeze(1)
728
+ else:
729
+ # The -q_len: slice assumes left padding.
730
+ attention_mask = attention_mask[:, -query_length:]
731
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
732
+
733
+ return (
734
+ query_layer,
735
+ key_layer,
736
+ value_layer,
737
+ indices_q,
738
+ (cu_seqlens_q, cu_seqlens_k),
739
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
740
+ )
741
+
742
+
743
+ class LlamaSdpaAttention(LlamaAttention):
744
+ """
745
+ Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
746
+ `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
747
+ SDPA API.
748
+ """
749
+
750
+ # Adapted from LlamaAttention.forward
751
+ def forward(
752
+ self,
753
+ hidden_states: torch.Tensor,
754
+ attention_mask: Optional[torch.Tensor] = None,
755
+ position_ids: Optional[torch.LongTensor] = None,
756
+ past_key_value: Optional[Cache] = None,
757
+ output_attentions: bool = False,
758
+ use_cache: bool = False,
759
+ cache_position: Optional[torch.LongTensor] = None,
760
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
761
+ if output_attentions:
762
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
763
+ logger.warning_once(
764
+ "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
765
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
766
+ )
767
+ return super().forward(
768
+ hidden_states=hidden_states,
769
+ attention_mask=attention_mask,
770
+ position_ids=position_ids,
771
+ past_key_value=past_key_value,
772
+ output_attentions=output_attentions,
773
+ use_cache=use_cache,
774
+ cache_position=cache_position,
775
+ )
776
+
777
+ bsz, q_len, _ = hidden_states.size()
778
+
779
+ query_states = self.q_proj(hidden_states)
780
+ key_states = self.k_proj(hidden_states)
781
+ value_states = self.v_proj(hidden_states)
782
+
783
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
784
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
785
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
786
+
787
+ cos, sin = self.rotary_emb(value_states, position_ids)
788
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
789
+
790
+ # In case static cache is used, it is an instance attribute.
791
+ past_key_value = getattr(self, "past_key_value", past_key_value)
792
+
793
+ if past_key_value is not None:
794
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
795
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
796
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
797
+
798
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
799
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
800
+
801
+ causal_mask = attention_mask
802
+ if attention_mask is not None and cache_position is not None:
803
+ causal_mask = causal_mask[:, :, cache_position, : key_states.shape[-2]]
804
+
805
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
806
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
807
+ if query_states.device.type == "cuda" and causal_mask is not None:
808
+ query_states = query_states.contiguous()
809
+ key_states = key_states.contiguous()
810
+ value_states = value_states.contiguous()
811
+
812
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
813
+ query_states,
814
+ key_states,
815
+ value_states,
816
+ attn_mask=causal_mask,
817
+ dropout_p=self.attention_dropout if self.training else 0.0,
818
+ )
819
+
820
+ attn_output = attn_output.transpose(1, 2).contiguous()
821
+ attn_output = attn_output.view(bsz, q_len, self.hidden_size)
822
+
823
+ attn_output = self.o_proj(attn_output)
824
+
825
+ return attn_output, None, past_key_value
826
+
827
+
828
+ try:
829
+ from ring_flash_attn import zigzag_ring_flash_attn_qkvpacked_func, zigzag_ring_flash_attn_varlen_func
830
+ except ImportError:
831
+ print("Please install the ring-flash-attn package")
832
+
833
+ LLAMA_ATTENTION_CLASSES = {
834
+ "eager": LlamaAttention,
835
+ "flash_attention_2": LlamaFlashAttention2,
836
+ "ring_flash_attention_2": LlamaRingFlashAttention2,
837
+ "sdpa": LlamaSdpaAttention,
838
+ }
839
+
840
+
841
+ class LlamaDecoderLayer(nn.Module):
842
+ def __init__(self, config: LlamaConfig, layer_idx: int):
843
+ super().__init__()
844
+ self.hidden_size = config.hidden_size
845
+
846
+ self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
847
+
848
+ self.mlp = LlamaMLP(config)
849
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
850
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
851
+
852
+ def forward(
853
+ self,
854
+ hidden_states: torch.Tensor,
855
+ attention_mask: Optional[torch.Tensor] = None,
856
+ position_ids: Optional[torch.LongTensor] = None,
857
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
858
+ output_attentions: Optional[bool] = False,
859
+ use_cache: Optional[bool] = False,
860
+ cache_position: Optional[torch.LongTensor] = None,
861
+ **kwargs,
862
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
863
+ """
864
+ Args:
865
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
866
+ attention_mask (`torch.FloatTensor`, *optional*):
867
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
868
+ query_sequence_length, key_sequence_length)` if default attention is used.
869
+ output_attentions (`bool`, *optional*):
870
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
871
+ returned tensors for more detail.
872
+ use_cache (`bool`, *optional*):
873
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
874
+ (see `past_key_values`).
875
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
876
+ """
877
+ if "padding_mask" in kwargs:
878
+ warnings.warn("Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`")
879
+
880
+ residual = hidden_states
881
+
882
+ hidden_states = self.input_layernorm(hidden_states)
883
+
884
+ # Self Attention
885
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
886
+ hidden_states=hidden_states,
887
+ attention_mask=attention_mask,
888
+ position_ids=position_ids,
889
+ past_key_value=past_key_value,
890
+ output_attentions=output_attentions,
891
+ use_cache=use_cache,
892
+ cache_position=cache_position,
893
+ **kwargs,
894
+ )
895
+ hidden_states = residual + hidden_states
896
+
897
+ # Fully Connected
898
+ residual = hidden_states
899
+ hidden_states = self.post_attention_layernorm(hidden_states)
900
+ hidden_states = self.mlp(hidden_states)
901
+ hidden_states = residual + hidden_states
902
+
903
+ outputs = (hidden_states,)
904
+
905
+ if output_attentions:
906
+ outputs += (self_attn_weights,)
907
+
908
+ if use_cache:
909
+ outputs += (present_key_value,)
910
+
911
+ return outputs
912
+
913
+
914
+ LLAMA_START_DOCSTRING = r"""
915
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
916
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
917
+ etc.)
918
+
919
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
920
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
921
+ and behavior.
922
+
923
+ Parameters:
924
+ config ([`LlamaConfig`]):
925
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
926
+ load the weights associated with the model, only the configuration. Check out the
927
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
928
+ """
929
+
930
+
931
+ @add_start_docstrings(
932
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
933
+ LLAMA_START_DOCSTRING,
934
+ )
935
+ class LlamaPreTrainedModel(PreTrainedModel):
936
+ config_class = LlamaConfig
937
+ base_model_prefix = "model"
938
+ supports_gradient_checkpointing = True
939
+ _no_split_modules = ["LlamaDecoderLayer"]
940
+ _skip_keys_device_placement = ["past_key_values", "causal_mask"]
941
+ _supports_flash_attn_2 = True
942
+ _supports_sdpa = True
943
+ _supports_cache_class = True
944
+
945
+ def _init_weights(self, module):
946
+ std = self.config.initializer_range
947
+ if isinstance(module, nn.Linear):
948
+ module.weight.data.normal_(mean=0.0, std=std)
949
+ if module.bias is not None:
950
+ module.bias.data.zero_()
951
+ elif isinstance(module, nn.Embedding):
952
+ module.weight.data.normal_(mean=0.0, std=std)
953
+ if module.padding_idx is not None:
954
+ module.weight.data[module.padding_idx].zero_()
955
+
956
+ def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None):
957
+ if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache:
958
+ raise ValueError("`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers")
959
+
960
+ if max_cache_len > self.model.causal_mask.shape[-1] or self.device != self.model.causal_mask.device:
961
+ causal_mask = torch.full((max_cache_len, max_cache_len), fill_value=True, device=self.device, dtype=torch.bool)
962
+ self.register_buffer("causal_mask", torch.triu(causal_mask, diagonal=1), persistent=False)
963
+
964
+ for layer in self.model.layers:
965
+ device = layer.input_layernorm.weight.device
966
+ if hasattr(self.config, "_pre_quantization_dtype"):
967
+ dtype = self.config._pre_quantization_dtype
968
+ else:
969
+ dtype = layer.self_attn.o_proj.weight.dtype
970
+ layer.self_attn.past_key_value = cache_cls(self.config, max_batch_size, max_cache_len, device=device, dtype=dtype)
971
+
972
+ def _reset_cache(self):
973
+ for layer in self.model.layers:
974
+ layer.self_attn.past_key_value = None
975
+
976
+
977
+ LLAMA_INPUTS_DOCSTRING = r"""
978
+ Args:
979
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
980
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
981
+ it.
982
+
983
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
984
+ [`PreTrainedTokenizer.__call__`] for details.
985
+
986
+ [What are input IDs?](../glossary#input-ids)
987
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
988
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
989
+
990
+ - 1 for tokens that are **not masked**,
991
+ - 0 for tokens that are **masked**.
992
+
993
+ [What are attention masks?](../glossary#attention-mask)
994
+
995
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
996
+ [`PreTrainedTokenizer.__call__`] for details.
997
+
998
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
999
+ `past_key_values`).
1000
+
1001
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
1002
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
1003
+ information on the default strategy.
1004
+
1005
+ - 1 indicates the head is **not masked**,
1006
+ - 0 indicates the head is **masked**.
1007
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1008
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1009
+ config.n_positions - 1]`.
1010
+
1011
+ [What are position IDs?](../glossary#position-ids)
1012
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
1013
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
1014
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
1015
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
1016
+
1017
+ Two formats are allowed:
1018
+ - a [`~cache_utils.Cache`] instance;
1019
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
1020
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
1021
+ cache format.
1022
+
1023
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
1024
+ legacy cache format will be returned.
1025
+
1026
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
1027
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
1028
+ of shape `(batch_size, sequence_length)`.
1029
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
1030
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
1031
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
1032
+ model's internal embedding lookup matrix.
1033
+ use_cache (`bool`, *optional*):
1034
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
1035
+ `past_key_values`).
1036
+ output_attentions (`bool`, *optional*):
1037
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1038
+ tensors for more detail.
1039
+ output_hidden_states (`bool`, *optional*):
1040
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1041
+ more detail.
1042
+ return_dict (`bool`, *optional*):
1043
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
1044
+ """
1045
+
1046
+
1047
+ @add_start_docstrings(
1048
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
1049
+ LLAMA_START_DOCSTRING,
1050
+ )
1051
+ class LlamaModel(LlamaPreTrainedModel):
1052
+ """
1053
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
1054
+
1055
+ Args:
1056
+ config: LlamaConfig
1057
+ """
1058
+
1059
+ def __init__(self, config: LlamaConfig):
1060
+ super().__init__(config)
1061
+ self.padding_idx = config.pad_token_id
1062
+ self.vocab_size = config.vocab_size
1063
+
1064
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
1065
+ self.layers = nn.ModuleList([LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
1066
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
1067
+ self.gradient_checkpointing = False
1068
+
1069
+ # Register a causal mask to separate causal and padding mask creation. Merging happens in the attention class.
1070
+ # NOTE: This is not friendly with TorchScript, ONNX, ExportedProgram serialization for very large `max_position_embeddings`.
1071
+ causal_mask = torch.full((config.max_position_embeddings, config.max_position_embeddings), fill_value=True, dtype=torch.bool)
1072
+ self.register_buffer("causal_mask", torch.triu(causal_mask, diagonal=1), persistent=False)
1073
+ # Initialize weights and apply final processing
1074
+ self.post_init()
1075
+
1076
+ def get_input_embeddings(self):
1077
+ return self.embed_tokens
1078
+
1079
+ def set_input_embeddings(self, value):
1080
+ self.embed_tokens = value
1081
+
1082
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1083
+ def forward(
1084
+ self,
1085
+ input_ids: torch.LongTensor = None,
1086
+ attention_mask: Optional[torch.Tensor] = None,
1087
+ position_ids: Optional[torch.LongTensor] = None,
1088
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1089
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1090
+ use_cache: Optional[bool] = None,
1091
+ output_attentions: Optional[bool] = None,
1092
+ output_hidden_states: Optional[bool] = None,
1093
+ return_dict: Optional[bool] = None,
1094
+ cache_position: Optional[torch.LongTensor] = None,
1095
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
1096
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1097
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1098
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
1099
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1100
+
1101
+ if (input_ids is None) ^ (inputs_embeds is not None):
1102
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one")
1103
+
1104
+ if self.gradient_checkpointing and self.training and use_cache:
1105
+ logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.")
1106
+ use_cache = False
1107
+
1108
+ if inputs_embeds is None:
1109
+ inputs_embeds = self.embed_tokens(input_ids)
1110
+
1111
+ past_seen_tokens = 0
1112
+ if use_cache: # kept for BC (cache positions)
1113
+ if not isinstance(past_key_values, StaticCache):
1114
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
1115
+ past_seen_tokens = past_key_values.get_seq_length()
1116
+
1117
+ if cache_position is None:
1118
+ if isinstance(past_key_values, StaticCache):
1119
+ raise ValueError("cache_position is a required argument when using StaticCache.")
1120
+ cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)
1121
+
1122
+ if position_ids is None:
1123
+ position_ids = cache_position.unsqueeze(0)
1124
+
1125
+ causal_mask = self._update_causal_mask(attention_mask, inputs_embeds)
1126
+
1127
+ # embed positions
1128
+ hidden_states = inputs_embeds
1129
+
1130
+ # decoder layers
1131
+ all_hidden_states = () if output_hidden_states else None
1132
+ all_self_attns = () if output_attentions else None
1133
+ next_decoder_cache = None
1134
+
1135
+ for decoder_layer in self.layers:
1136
+ if output_hidden_states:
1137
+ all_hidden_states += (hidden_states,)
1138
+
1139
+ if self.gradient_checkpointing and self.training:
1140
+ layer_outputs = self._gradient_checkpointing_func(
1141
+ decoder_layer.__call__,
1142
+ hidden_states,
1143
+ causal_mask,
1144
+ position_ids,
1145
+ past_key_values,
1146
+ output_attentions,
1147
+ use_cache,
1148
+ cache_position,
1149
+ )
1150
+ else:
1151
+ layer_outputs = decoder_layer(
1152
+ hidden_states,
1153
+ attention_mask=causal_mask,
1154
+ position_ids=position_ids,
1155
+ past_key_value=past_key_values,
1156
+ output_attentions=output_attentions,
1157
+ use_cache=use_cache,
1158
+ cache_position=cache_position,
1159
+ )
1160
+
1161
+ hidden_states = layer_outputs[0]
1162
+
1163
+ if use_cache:
1164
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
1165
+
1166
+ if output_attentions:
1167
+ all_self_attns += (layer_outputs[1],)
1168
+
1169
+ hidden_states = self.norm(hidden_states)
1170
+
1171
+ # add hidden states from the last decoder layer
1172
+ if output_hidden_states:
1173
+ all_hidden_states += (hidden_states,)
1174
+
1175
+ next_cache = None
1176
+ if use_cache:
1177
+ next_cache = next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache
1178
+ if not return_dict:
1179
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
1180
+ return BaseModelOutputWithPast(
1181
+ last_hidden_state=hidden_states,
1182
+ past_key_values=next_cache,
1183
+ hidden_states=all_hidden_states,
1184
+ attentions=all_self_attns,
1185
+ )
1186
+
1187
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
1188
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
1189
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
1190
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
1191
+ def _update_causal_mask(self, attention_mask, input_tensor):
1192
+ if self.config._attn_implementation == "flash_attention_2":
1193
+ if attention_mask is not None and 0.0 in attention_mask:
1194
+ return attention_mask
1195
+ return None
1196
+
1197
+ batch_size, seq_length = input_tensor.shape[:2]
1198
+ dtype = input_tensor.dtype
1199
+ device = input_tensor.device
1200
+
1201
+ # support going beyond cached `max_position_embedding`
1202
+ if seq_length > self.causal_mask.shape[-1]:
1203
+ causal_mask = torch.full((2 * self.causal_mask.shape[-1], 2 * self.causal_mask.shape[-1]), fill_value=1)
1204
+ self.register_buffer("causal_mask", torch.triu(causal_mask, diagonal=1), persistent=False)
1205
+
1206
+ # We use the current dtype to avoid any overflows
1207
+ min_dtype = torch.finfo(dtype).min
1208
+ causal_mask = self.causal_mask[None, None, :, :].repeat(batch_size, 1, 1, 1).to(dtype) * min_dtype
1209
+
1210
+ causal_mask = causal_mask.to(dtype=dtype, device=device)
1211
+ if attention_mask is not None and attention_mask.dim() == 2:
1212
+ mask_length = attention_mask.shape[-1]
1213
+ padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
1214
+ causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
1215
+
1216
+ if self.config._attn_implementation == "sdpa" and attention_mask is not None and attention_mask.device.type == "cuda":
1217
+ # TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400).
1218
+ is_tracing = torch.jit.is_tracing() or isinstance(input_tensor, torch.fx.Proxy) or (hasattr(torch, "_dynamo") and torch._dynamo.is_compiling())
1219
+ if not is_tracing and torch.any(attention_mask != 1):
1220
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1221
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1222
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1223
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1224
+
1225
+ return causal_mask
1226
+
1227
+
1228
+ class LlamaForCausalLM(LlamaPreTrainedModel):
1229
+ _tied_weights_keys = ["lm_head.weight"]
1230
+
1231
+ def __init__(self, config):
1232
+ super().__init__(config)
1233
+ self.model = LlamaModel(config)
1234
+ self.vocab_size = config.vocab_size
1235
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1236
+
1237
+ # Initialize weights and apply final processing
1238
+ self.post_init()
1239
+
1240
+ def get_input_embeddings(self):
1241
+ return self.model.embed_tokens
1242
+
1243
+ def set_input_embeddings(self, value):
1244
+ self.model.embed_tokens = value
1245
+
1246
+ def get_output_embeddings(self):
1247
+ return self.lm_head
1248
+
1249
+ def set_output_embeddings(self, new_embeddings):
1250
+ self.lm_head = new_embeddings
1251
+
1252
+ def set_decoder(self, decoder):
1253
+ self.model = decoder
1254
+
1255
+ def get_decoder(self):
1256
+ return self.model
1257
+
1258
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1259
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1260
+ def forward(
1261
+ self,
1262
+ input_ids: torch.LongTensor = None,
1263
+ attention_mask: Optional[torch.Tensor] = None,
1264
+ position_ids: Optional[torch.LongTensor] = None,
1265
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1266
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1267
+ labels: Optional[torch.LongTensor] = None,
1268
+ use_cache: Optional[bool] = None,
1269
+ output_attentions: Optional[bool] = None,
1270
+ output_hidden_states: Optional[bool] = None,
1271
+ return_dict: Optional[bool] = None,
1272
+ cache_position: Optional[torch.LongTensor] = None,
1273
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1274
+ r"""
1275
+ Args:
1276
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1277
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1278
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1279
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1280
+
1281
+ Returns:
1282
+
1283
+ Example:
1284
+
1285
+ ```python
1286
+ >>> from transformers import AutoTokenizer, LlamaForCausalLM
1287
+
1288
+ >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
1289
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
1290
+
1291
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1292
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1293
+
1294
+ >>> # Generate
1295
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1296
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1297
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1298
+ ```"""
1299
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1300
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1301
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1302
+
1303
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1304
+ outputs = self.model(
1305
+ input_ids=input_ids,
1306
+ attention_mask=attention_mask,
1307
+ position_ids=position_ids,
1308
+ past_key_values=past_key_values,
1309
+ inputs_embeds=inputs_embeds,
1310
+ use_cache=use_cache,
1311
+ output_attentions=output_attentions,
1312
+ output_hidden_states=output_hidden_states,
1313
+ return_dict=return_dict,
1314
+ cache_position=cache_position,
1315
+ )
1316
+
1317
+ hidden_states = outputs[0]
1318
+ if self.config.pretraining_tp > 1:
1319
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
1320
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
1321
+ logits = torch.cat(logits, dim=-1)
1322
+ else:
1323
+ logits = self.lm_head(hidden_states)
1324
+ logits = logits.float()
1325
+
1326
+ loss = None
1327
+ if labels is not None:
1328
+ # Shift so that tokens < n predict n
1329
+ shift_logits = logits[..., :-1, :].contiguous()
1330
+ shift_labels = labels[..., 1:].contiguous()
1331
+ # Flatten the tokens
1332
+ loss_fct = CrossEntropyLoss()
1333
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1334
+ shift_labels = shift_labels.view(-1)
1335
+ # Enable model parallelism
1336
+ shift_labels = shift_labels.to(shift_logits.device)
1337
+ loss = loss_fct(shift_logits, shift_labels)
1338
+
1339
+ if not return_dict:
1340
+ output = (logits,) + outputs[1:]
1341
+ return (loss,) + output if loss is not None else output
1342
+
1343
+ return CausalLMOutputWithPast(
1344
+ loss=loss,
1345
+ logits=logits,
1346
+ past_key_values=outputs.past_key_values,
1347
+ hidden_states=outputs.hidden_states,
1348
+ attentions=outputs.attentions,
1349
+ )
1350
+
1351
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs):
1352
+ past_length = 0
1353
+ if past_key_values is not None:
1354
+ if isinstance(past_key_values, Cache):
1355
+ cache_length = past_key_values.get_seq_length()
1356
+ past_length = past_key_values.seen_tokens
1357
+ max_cache_length = past_key_values.get_max_length()
1358
+ else:
1359
+ cache_length = past_length = past_key_values[0][0].shape[2]
1360
+ max_cache_length = None
1361
+
1362
+ # Keep only the unprocessed tokens:
1363
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
1364
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
1365
+ # input)
1366
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
1367
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
1368
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
1369
+ # input_ids based on the past_length.
1370
+ elif past_length < input_ids.shape[1]:
1371
+ input_ids = input_ids[:, past_length:]
1372
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
1373
+
1374
+ # If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
1375
+ if max_cache_length is not None and attention_mask is not None and cache_length + input_ids.shape[1] > max_cache_length:
1376
+ attention_mask = attention_mask[:, -max_cache_length:]
1377
+
1378
+ position_ids = kwargs.get("position_ids", None)
1379
+ if attention_mask is not None and position_ids is None:
1380
+ # create position_ids on the fly for batch generation
1381
+ position_ids = attention_mask.long().cumsum(-1) - 1
1382
+ position_ids.masked_fill_(attention_mask == 0, 1)
1383
+ if past_key_values:
1384
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1385
+
1386
+ if self.generation_config.cache_implementation == "static":
1387
+ # generation with static cache
1388
+ cache_position = kwargs.get("cache_position", None)
1389
+ if cache_position is None:
1390
+ past_length = 0
1391
+ else:
1392
+ past_length = cache_position[-1] + 1
1393
+ input_ids = input_ids[:, past_length:]
1394
+ position_ids = position_ids[:, past_length:]
1395
+
1396
+ # TODO @gante we should only keep a `cache_position` in generate, and do +=1.
1397
+ # same goes for position ids. Could also help with continued generation.
1398
+ input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
1399
+ cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
1400
+ position_ids = position_ids.contiguous() if position_ids is not None else None
1401
+
1402
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1403
+ if inputs_embeds is not None and past_key_values is None:
1404
+ model_inputs = {"inputs_embeds": inputs_embeds}
1405
+ else:
1406
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
1407
+ # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114
1408
+ # TODO: use `next_tokens` directly instead.
1409
+ model_inputs = {"input_ids": input_ids.contiguous()}
1410
+
1411
+ model_inputs.update(
1412
+ {
1413
+ "position_ids": position_ids,
1414
+ "cache_position": cache_position,
1415
+ "past_key_values": past_key_values,
1416
+ "use_cache": kwargs.get("use_cache"),
1417
+ "attention_mask": attention_mask,
1418
+ }
1419
+ )
1420
+ return model_inputs
1421
+
1422
+ @staticmethod
1423
+ def _reorder_cache(past_key_values, beam_idx):
1424
+ reordered_past = ()
1425
+ for layer_past in past_key_values:
1426
+ reordered_past += (tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),)
1427
+ return reordered_past
1428
+
1429
+
1430
+ @add_start_docstrings(
1431
+ """
1432
+ The LLaMa Model transformer with a sequence classification head on top (linear layer).
1433
+
1434
+ [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
1435
+ (e.g. GPT-2) do.
1436
+
1437
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1438
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1439
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1440
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1441
+ each row of the batch).
1442
+ """,
1443
+ LLAMA_START_DOCSTRING,
1444
+ )
1445
+ class LlamaForSequenceClassification(LlamaPreTrainedModel):
1446
+ def __init__(self, config):
1447
+ super().__init__(config)
1448
+ self.num_labels = config.num_labels
1449
+ self.model = LlamaModel(config)
1450
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1451
+
1452
+ # Initialize weights and apply final processing
1453
+ self.post_init()
1454
+
1455
+ def get_input_embeddings(self):
1456
+ return self.model.embed_tokens
1457
+
1458
+ def set_input_embeddings(self, value):
1459
+ self.model.embed_tokens = value
1460
+
1461
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1462
+ def forward(
1463
+ self,
1464
+ input_ids: torch.LongTensor = None,
1465
+ attention_mask: Optional[torch.Tensor] = None,
1466
+ position_ids: Optional[torch.LongTensor] = None,
1467
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1468
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1469
+ labels: Optional[torch.LongTensor] = None,
1470
+ use_cache: Optional[bool] = None,
1471
+ output_attentions: Optional[bool] = None,
1472
+ output_hidden_states: Optional[bool] = None,
1473
+ return_dict: Optional[bool] = None,
1474
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1475
+ r"""
1476
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1477
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1478
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1479
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1480
+ """
1481
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1482
+
1483
+ transformer_outputs = self.model(
1484
+ input_ids,
1485
+ attention_mask=attention_mask,
1486
+ position_ids=position_ids,
1487
+ past_key_values=past_key_values,
1488
+ inputs_embeds=inputs_embeds,
1489
+ use_cache=use_cache,
1490
+ output_attentions=output_attentions,
1491
+ output_hidden_states=output_hidden_states,
1492
+ return_dict=return_dict,
1493
+ )
1494
+ hidden_states = transformer_outputs[0]
1495
+ logits = self.score(hidden_states)
1496
+
1497
+ if input_ids is not None:
1498
+ batch_size = input_ids.shape[0]
1499
+ else:
1500
+ batch_size = inputs_embeds.shape[0]
1501
+
1502
+ if self.config.pad_token_id is None and batch_size != 1:
1503
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1504
+ if self.config.pad_token_id is None:
1505
+ sequence_lengths = -1
1506
+ else:
1507
+ if input_ids is not None:
1508
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1509
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1510
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1511
+ sequence_lengths = sequence_lengths.to(logits.device)
1512
+ else:
1513
+ sequence_lengths = -1
1514
+
1515
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1516
+
1517
+ loss = None
1518
+ if labels is not None:
1519
+ labels = labels.to(logits.device)
1520
+ if self.config.problem_type is None:
1521
+ if self.num_labels == 1:
1522
+ self.config.problem_type = "regression"
1523
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1524
+ self.config.problem_type = "single_label_classification"
1525
+ else:
1526
+ self.config.problem_type = "multi_label_classification"
1527
+
1528
+ if self.config.problem_type == "regression":
1529
+ loss_fct = MSELoss()
1530
+ if self.num_labels == 1:
1531
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1532
+ else:
1533
+ loss = loss_fct(pooled_logits, labels)
1534
+ elif self.config.problem_type == "single_label_classification":
1535
+ loss_fct = CrossEntropyLoss()
1536
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1537
+ elif self.config.problem_type == "multi_label_classification":
1538
+ loss_fct = BCEWithLogitsLoss()
1539
+ loss = loss_fct(pooled_logits, labels)
1540
+ if not return_dict:
1541
+ output = (pooled_logits,) + transformer_outputs[1:]
1542
+ return ((loss,) + output) if loss is not None else output
1543
+
1544
+ return SequenceClassifierOutputWithPast(
1545
+ loss=loss,
1546
+ logits=pooled_logits,
1547
+ past_key_values=transformer_outputs.past_key_values,
1548
+ hidden_states=transformer_outputs.hidden_states,
1549
+ attentions=transformer_outputs.attentions,
1550
+ )
1551
+
1552
+
1553
+ @add_start_docstrings(
1554
+ """
1555
+ The Llama Model transformer with a span classification head on top for extractive question-answering tasks like
1556
+ SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
1557
+ """,
1558
+ LLAMA_START_DOCSTRING,
1559
+ )
1560
+ class LlamaForQuestionAnswering(LlamaPreTrainedModel):
1561
+ base_model_prefix = "transformer"
1562
+
1563
+ # Copied from transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering.__init__ with Bloom->Llama
1564
+ def __init__(self, config):
1565
+ super().__init__(config)
1566
+ self.transformer = LlamaModel(config)
1567
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1568
+
1569
+ # Initialize weights and apply final processing
1570
+ self.post_init()
1571
+
1572
+ def get_input_embeddings(self):
1573
+ return self.transformer.embed_tokens
1574
+
1575
+ def set_input_embeddings(self, value):
1576
+ self.transformer.embed_tokens = value
1577
+
1578
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1579
+ def forward(
1580
+ self,
1581
+ input_ids: Optional[torch.LongTensor] = None,
1582
+ attention_mask: Optional[torch.FloatTensor] = None,
1583
+ position_ids: Optional[torch.LongTensor] = None,
1584
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1585
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1586
+ start_positions: Optional[torch.LongTensor] = None,
1587
+ end_positions: Optional[torch.LongTensor] = None,
1588
+ output_attentions: Optional[bool] = None,
1589
+ output_hidden_states: Optional[bool] = None,
1590
+ return_dict: Optional[bool] = None,
1591
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1592
+ r"""
1593
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1594
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1595
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1596
+ are not taken into account for computing the loss.
1597
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1598
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1599
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1600
+ are not taken into account for computing the loss.
1601
+ """
1602
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1603
+
1604
+ outputs = self.transformer(
1605
+ input_ids,
1606
+ attention_mask=attention_mask,
1607
+ position_ids=position_ids,
1608
+ past_key_values=past_key_values,
1609
+ inputs_embeds=inputs_embeds,
1610
+ output_attentions=output_attentions,
1611
+ output_hidden_states=output_hidden_states,
1612
+ return_dict=return_dict,
1613
+ )
1614
+
1615
+ sequence_output = outputs[0]
1616
+
1617
+ logits = self.qa_outputs(sequence_output)
1618
+ start_logits, end_logits = logits.split(1, dim=-1)
1619
+ start_logits = start_logits.squeeze(-1).contiguous()
1620
+ end_logits = end_logits.squeeze(-1).contiguous()
1621
+
1622
+ total_loss = None
1623
+ if start_positions is not None and end_positions is not None:
1624
+ # If we are on multi-GPU, split add a dimension
1625
+ if len(start_positions.size()) > 1:
1626
+ start_positions = start_positions.squeeze(-1).to(start_logits.device)
1627
+ if len(end_positions.size()) > 1:
1628
+ end_positions = end_positions.squeeze(-1).to(end_logits.device)
1629
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1630
+ ignored_index = start_logits.size(1)
1631
+ start_positions = start_positions.clamp(0, ignored_index)
1632
+ end_positions = end_positions.clamp(0, ignored_index)
1633
+
1634
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1635
+ start_loss = loss_fct(start_logits, start_positions)
1636
+ end_loss = loss_fct(end_logits, end_positions)
1637
+ total_loss = (start_loss + end_loss) / 2
1638
+
1639
+ if not return_dict:
1640
+ output = (start_logits, end_logits) + outputs[2:]
1641
+ return ((total_loss,) + output) if total_loss is not None else output
1642
+
1643
+ return QuestionAnsweringModelOutput(
1644
+ loss=total_loss,
1645
+ start_logits=start_logits,
1646
+ end_logits=end_logits,
1647
+ hidden_states=outputs.hidden_states,
1648
+ attentions=outputs.attentions,
1649
+ )
VLMEvalKit-sudoku/llava/model/multimodal_resampler/__pycache__/perceiver.cpython-310.pyc ADDED
Binary file (4.83 kB). View file
 
VLMEvalKit-sudoku/llava/model/multimodal_resampler/qformer.py ADDED
@@ -0,0 +1,1160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ * Copyright (c) 2023, salesforce.com, inc.
3
+ * All rights reserved.
4
+ * SPDX-License-Identifier: BSD-3-Clause
5
+ * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
+ * By Junnan Li
7
+ * Based on huggingface code base
8
+ * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert
9
+ """
10
+
11
+ import math
12
+ import os
13
+ import warnings
14
+ from dataclasses import dataclass
15
+ from typing import Optional, Tuple, Dict, Any
16
+
17
+ import torch
18
+ from torch import Tensor, device, dtype, nn
19
+ import torch.utils.checkpoint
20
+ from torch import nn
21
+ from torch.nn import CrossEntropyLoss
22
+ import torch.nn.functional as F
23
+
24
+ from transformers.activations import ACT2FN
25
+ from transformers.file_utils import (
26
+ ModelOutput,
27
+ )
28
+ from transformers.modeling_outputs import (
29
+ BaseModelOutputWithPastAndCrossAttentions,
30
+ BaseModelOutputWithPoolingAndCrossAttentions,
31
+ CausalLMOutputWithCrossAttentions,
32
+ MaskedLMOutput,
33
+ MultipleChoiceModelOutput,
34
+ NextSentencePredictorOutput,
35
+ QuestionAnsweringModelOutput,
36
+ SequenceClassifierOutput,
37
+ TokenClassifierOutput,
38
+ )
39
+ from transformers.modeling_utils import (
40
+ PreTrainedModel,
41
+ apply_chunking_to_forward,
42
+ find_pruneable_heads_and_indices,
43
+ prune_linear_layer,
44
+ )
45
+ from transformers.utils import logging
46
+ from transformers.models.bert.configuration_bert import BertConfig
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ def disabled_train(self, mode=True):
52
+ """Overwrite model.train with this function to make sure train/eval mode
53
+ does not change anymore."""
54
+ return self
55
+
56
+
57
+ class BertEmbeddings(nn.Module):
58
+ """Construct the embeddings from word and position embeddings."""
59
+
60
+ def __init__(self, config):
61
+ super().__init__()
62
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
63
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
64
+
65
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
66
+ # any TensorFlow checkpoint file
67
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
68
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
69
+
70
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
71
+ self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
72
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
73
+
74
+ self.config = config
75
+
76
+ def forward(
77
+ self,
78
+ input_ids=None,
79
+ position_ids=None,
80
+ query_embeds=None,
81
+ past_key_values_length=0,
82
+ ):
83
+ if input_ids is not None:
84
+ seq_length = input_ids.size()[1]
85
+ else:
86
+ seq_length = 0
87
+
88
+ if position_ids is None:
89
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length].clone()
90
+
91
+ if input_ids is not None:
92
+ embeddings = self.word_embeddings(input_ids)
93
+ if self.position_embedding_type == "absolute":
94
+ position_embeddings = self.position_embeddings(position_ids)
95
+ embeddings = embeddings + position_embeddings
96
+
97
+ if query_embeds is not None:
98
+ embeddings = torch.cat((query_embeds, embeddings), dim=1)
99
+ else:
100
+ embeddings = query_embeds
101
+
102
+ embeddings = self.LayerNorm(embeddings)
103
+ embeddings = self.dropout(embeddings)
104
+ return embeddings
105
+
106
+
107
+ class BertSelfAttention(nn.Module):
108
+ def __init__(self, config, is_cross_attention):
109
+ super().__init__()
110
+ self.config = config
111
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
112
+ raise ValueError("The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads))
113
+
114
+ self.num_attention_heads = config.num_attention_heads
115
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
116
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
117
+
118
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
119
+ if is_cross_attention:
120
+ self.key = nn.Linear(config.encoder_width, self.all_head_size)
121
+ self.value = nn.Linear(config.encoder_width, self.all_head_size)
122
+ else:
123
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
124
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
125
+
126
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
127
+ self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
128
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
129
+ self.max_position_embeddings = config.max_position_embeddings
130
+ self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
131
+ self.save_attention = False
132
+
133
+ def save_attn_gradients(self, attn_gradients):
134
+ self.attn_gradients = attn_gradients
135
+
136
+ def get_attn_gradients(self):
137
+ return self.attn_gradients
138
+
139
+ def save_attention_map(self, attention_map):
140
+ self.attention_map = attention_map
141
+
142
+ def get_attention_map(self):
143
+ return self.attention_map
144
+
145
+ def transpose_for_scores(self, x):
146
+ new_x_shape = x.size()[:-1] + (
147
+ self.num_attention_heads,
148
+ self.attention_head_size,
149
+ )
150
+ x = x.view(*new_x_shape)
151
+ return x.permute(0, 2, 1, 3)
152
+
153
+ def forward(
154
+ self,
155
+ hidden_states,
156
+ attention_mask=None,
157
+ head_mask=None,
158
+ encoder_hidden_states=None,
159
+ encoder_attention_mask=None,
160
+ past_key_value=None,
161
+ output_attentions=False,
162
+ ):
163
+
164
+ # If this is instantiated as a cross-attention module, the keys
165
+ # and values come from an encoder; the attention mask needs to be
166
+ # such that the encoder's padding tokens are not attended to.
167
+ is_cross_attention = encoder_hidden_states is not None
168
+
169
+ if is_cross_attention:
170
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
171
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
172
+ attention_mask = encoder_attention_mask
173
+ elif past_key_value is not None:
174
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
175
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
176
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
177
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
178
+ else:
179
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
180
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
181
+
182
+ mixed_query_layer = self.query(hidden_states)
183
+
184
+ query_layer = self.transpose_for_scores(mixed_query_layer)
185
+
186
+ past_key_value = (key_layer, value_layer)
187
+
188
+ # Take the dot product between "query" and "key" to get the raw attention scores.
189
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
190
+
191
+ if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
192
+ seq_length = hidden_states.size()[1]
193
+ position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
194
+ position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
195
+ distance = position_ids_l - position_ids_r
196
+ positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
197
+ positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
198
+
199
+ if self.position_embedding_type == "relative_key":
200
+ relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
201
+ attention_scores = attention_scores + relative_position_scores
202
+ elif self.position_embedding_type == "relative_key_query":
203
+ relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
204
+ relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
205
+ attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
206
+
207
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
208
+ if attention_mask is not None:
209
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
210
+ attention_scores = attention_scores + attention_mask
211
+
212
+ # Normalize the attention scores to probabilities.
213
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
214
+
215
+ if is_cross_attention and self.save_attention:
216
+ self.save_attention_map(attention_probs)
217
+ attention_probs.register_hook(self.save_attn_gradients)
218
+
219
+ # This is actually dropping out entire tokens to attend to, which might
220
+ # seem a bit unusual, but is taken from the original Transformer paper.
221
+ attention_probs_dropped = self.dropout(attention_probs)
222
+
223
+ # Mask heads if we want to
224
+ if head_mask is not None:
225
+ attention_probs_dropped = attention_probs_dropped * head_mask
226
+
227
+ context_layer = torch.matmul(attention_probs_dropped, value_layer)
228
+
229
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
230
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
231
+ context_layer = context_layer.view(*new_context_layer_shape)
232
+
233
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
234
+
235
+ outputs = outputs + (past_key_value,)
236
+ return outputs
237
+
238
+
239
+ class BertSelfOutput(nn.Module):
240
+ def __init__(self, config):
241
+ super().__init__()
242
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
243
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
244
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
245
+
246
+ def forward(self, hidden_states, input_tensor):
247
+ hidden_states = self.dense(hidden_states)
248
+ hidden_states = self.dropout(hidden_states)
249
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
250
+ return hidden_states
251
+
252
+
253
+ class BertAttention(nn.Module):
254
+ def __init__(self, config, is_cross_attention=False):
255
+ super().__init__()
256
+ self.self = BertSelfAttention(config, is_cross_attention)
257
+ self.output = BertSelfOutput(config)
258
+ self.pruned_heads = set()
259
+
260
+ def prune_heads(self, heads):
261
+ if len(heads) == 0:
262
+ return
263
+ heads, index = find_pruneable_heads_and_indices(
264
+ heads,
265
+ self.self.num_attention_heads,
266
+ self.self.attention_head_size,
267
+ self.pruned_heads,
268
+ )
269
+
270
+ # Prune linear layers
271
+ self.self.query = prune_linear_layer(self.self.query, index)
272
+ self.self.key = prune_linear_layer(self.self.key, index)
273
+ self.self.value = prune_linear_layer(self.self.value, index)
274
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
275
+
276
+ # Update hyper params and store pruned heads
277
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
278
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
279
+ self.pruned_heads = self.pruned_heads.union(heads)
280
+
281
+ def forward(
282
+ self,
283
+ hidden_states,
284
+ attention_mask=None,
285
+ head_mask=None,
286
+ encoder_hidden_states=None,
287
+ encoder_attention_mask=None,
288
+ past_key_value=None,
289
+ output_attentions=False,
290
+ ):
291
+ self_outputs = self.self(
292
+ hidden_states,
293
+ attention_mask,
294
+ head_mask,
295
+ encoder_hidden_states,
296
+ encoder_attention_mask,
297
+ past_key_value,
298
+ output_attentions,
299
+ )
300
+ attention_output = self.output(self_outputs[0], hidden_states)
301
+
302
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
303
+ return outputs
304
+
305
+
306
+ class BertIntermediate(nn.Module):
307
+ def __init__(self, config):
308
+ super().__init__()
309
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
310
+ if isinstance(config.hidden_act, str):
311
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
312
+ else:
313
+ self.intermediate_act_fn = config.hidden_act
314
+
315
+ def forward(self, hidden_states):
316
+ hidden_states = self.dense(hidden_states)
317
+ hidden_states = self.intermediate_act_fn(hidden_states)
318
+ return hidden_states
319
+
320
+
321
+ class BertOutput(nn.Module):
322
+ def __init__(self, config):
323
+ super().__init__()
324
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
325
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
326
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
327
+
328
+ def forward(self, hidden_states, input_tensor):
329
+ hidden_states = self.dense(hidden_states)
330
+ hidden_states = self.dropout(hidden_states)
331
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
332
+ return hidden_states
333
+
334
+
335
+ class BertLayer(nn.Module):
336
+ def __init__(self, config, layer_num):
337
+ super().__init__()
338
+ self.config = config
339
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
340
+ self.seq_len_dim = 1
341
+ self.attention = BertAttention(config)
342
+ self.layer_num = layer_num
343
+ if self.config.add_cross_attention and layer_num % self.config.cross_attention_freq == 0:
344
+ self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention)
345
+ self.has_cross_attention = True
346
+ else:
347
+ self.has_cross_attention = False
348
+ self.intermediate = BertIntermediate(config)
349
+ self.output = BertOutput(config)
350
+
351
+ self.intermediate_query = BertIntermediate(config)
352
+ self.output_query = BertOutput(config)
353
+
354
+ def forward(
355
+ self,
356
+ hidden_states,
357
+ attention_mask=None,
358
+ head_mask=None,
359
+ encoder_hidden_states=None,
360
+ encoder_attention_mask=None,
361
+ past_key_value=None,
362
+ output_attentions=False,
363
+ query_length=0,
364
+ ):
365
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
366
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
367
+ self_attention_outputs = self.attention(
368
+ hidden_states,
369
+ attention_mask,
370
+ head_mask,
371
+ output_attentions=output_attentions,
372
+ past_key_value=self_attn_past_key_value,
373
+ )
374
+ attention_output = self_attention_outputs[0]
375
+ outputs = self_attention_outputs[1:-1]
376
+
377
+ present_key_value = self_attention_outputs[-1]
378
+
379
+ if query_length > 0:
380
+ query_attention_output = attention_output[:, :query_length, :]
381
+
382
+ if self.has_cross_attention:
383
+ assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers"
384
+ cross_attention_outputs = self.crossattention(
385
+ query_attention_output,
386
+ attention_mask,
387
+ head_mask,
388
+ encoder_hidden_states,
389
+ encoder_attention_mask,
390
+ output_attentions=output_attentions,
391
+ )
392
+ query_attention_output = cross_attention_outputs[0]
393
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
394
+
395
+ layer_output = apply_chunking_to_forward(
396
+ self.feed_forward_chunk_query,
397
+ self.chunk_size_feed_forward,
398
+ self.seq_len_dim,
399
+ query_attention_output,
400
+ )
401
+ if attention_output.shape[1] > query_length:
402
+ layer_output_text = apply_chunking_to_forward(
403
+ self.feed_forward_chunk,
404
+ self.chunk_size_feed_forward,
405
+ self.seq_len_dim,
406
+ attention_output[:, query_length:, :],
407
+ )
408
+ layer_output = torch.cat([layer_output, layer_output_text], dim=1)
409
+ else:
410
+ layer_output = apply_chunking_to_forward(
411
+ self.feed_forward_chunk,
412
+ self.chunk_size_feed_forward,
413
+ self.seq_len_dim,
414
+ attention_output,
415
+ )
416
+ outputs = (layer_output,) + outputs
417
+
418
+ outputs = outputs + (present_key_value,)
419
+
420
+ return outputs
421
+
422
+ def feed_forward_chunk(self, attention_output):
423
+ intermediate_output = self.intermediate(attention_output)
424
+ layer_output = self.output(intermediate_output, attention_output)
425
+ return layer_output
426
+
427
+ def feed_forward_chunk_query(self, attention_output):
428
+ intermediate_output = self.intermediate_query(attention_output)
429
+ layer_output = self.output_query(intermediate_output, attention_output)
430
+ return layer_output
431
+
432
+
433
+ class BertEncoder(nn.Module):
434
+ def __init__(self, config):
435
+ super().__init__()
436
+ self.config = config
437
+ self.layer = nn.ModuleList([BertLayer(config, i) for i in range(config.num_hidden_layers)])
438
+
439
+ def forward(
440
+ self,
441
+ hidden_states,
442
+ attention_mask=None,
443
+ head_mask=None,
444
+ encoder_hidden_states=None,
445
+ encoder_attention_mask=None,
446
+ past_key_values=None,
447
+ use_cache=None,
448
+ output_attentions=False,
449
+ output_hidden_states=False,
450
+ return_dict=True,
451
+ query_length=0,
452
+ ):
453
+ all_hidden_states = () if output_hidden_states else None
454
+ all_self_attentions = () if output_attentions else None
455
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
456
+
457
+ next_decoder_cache = () if use_cache else None
458
+
459
+ for i in range(self.config.num_hidden_layers):
460
+ layer_module = self.layer[i]
461
+ if output_hidden_states:
462
+ all_hidden_states = all_hidden_states + (hidden_states,)
463
+
464
+ layer_head_mask = head_mask[i] if head_mask is not None else None
465
+ past_key_value = past_key_values[i] if past_key_values is not None else None
466
+
467
+ if getattr(self.config, "gradient_checkpointing", False) and self.training:
468
+
469
+ if use_cache:
470
+ logger.warn("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
471
+ use_cache = False
472
+
473
+ def create_custom_forward(module):
474
+ def custom_forward(*inputs):
475
+ return module(*inputs, past_key_value, output_attentions, query_length)
476
+
477
+ return custom_forward
478
+
479
+ layer_outputs = torch.utils.checkpoint.checkpoint(
480
+ create_custom_forward(layer_module),
481
+ hidden_states,
482
+ attention_mask,
483
+ layer_head_mask,
484
+ encoder_hidden_states,
485
+ encoder_attention_mask,
486
+ )
487
+ else:
488
+ layer_outputs = layer_module(
489
+ hidden_states,
490
+ attention_mask,
491
+ layer_head_mask,
492
+ encoder_hidden_states,
493
+ encoder_attention_mask,
494
+ past_key_value,
495
+ output_attentions,
496
+ query_length,
497
+ )
498
+
499
+ hidden_states = layer_outputs[0]
500
+ if use_cache:
501
+ next_decoder_cache += (layer_outputs[-1],)
502
+ if output_attentions:
503
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
504
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
505
+
506
+ if output_hidden_states:
507
+ all_hidden_states = all_hidden_states + (hidden_states,)
508
+
509
+ if not return_dict:
510
+ return tuple(
511
+ v
512
+ for v in [
513
+ hidden_states,
514
+ next_decoder_cache,
515
+ all_hidden_states,
516
+ all_self_attentions,
517
+ all_cross_attentions,
518
+ ]
519
+ if v is not None
520
+ )
521
+ return BaseModelOutputWithPastAndCrossAttentions(
522
+ last_hidden_state=hidden_states,
523
+ past_key_values=next_decoder_cache,
524
+ hidden_states=all_hidden_states,
525
+ attentions=all_self_attentions,
526
+ cross_attentions=all_cross_attentions,
527
+ )
528
+
529
+
530
+ class BertPooler(nn.Module):
531
+ def __init__(self, config):
532
+ super().__init__()
533
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
534
+ self.activation = nn.Tanh()
535
+
536
+ def forward(self, hidden_states):
537
+ # We "pool" the model by simply taking the hidden state corresponding
538
+ # to the first token.
539
+ first_token_tensor = hidden_states[:, 0]
540
+ pooled_output = self.dense(first_token_tensor)
541
+ pooled_output = self.activation(pooled_output)
542
+ return pooled_output
543
+
544
+
545
+ class BertPredictionHeadTransform(nn.Module):
546
+ def __init__(self, config):
547
+ super().__init__()
548
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
549
+ if isinstance(config.hidden_act, str):
550
+ self.transform_act_fn = ACT2FN[config.hidden_act]
551
+ else:
552
+ self.transform_act_fn = config.hidden_act
553
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
554
+
555
+ def forward(self, hidden_states):
556
+ hidden_states = self.dense(hidden_states)
557
+ hidden_states = self.transform_act_fn(hidden_states)
558
+ hidden_states = self.LayerNorm(hidden_states)
559
+ return hidden_states
560
+
561
+
562
+ class BertLMPredictionHead(nn.Module):
563
+ def __init__(self, config):
564
+ super().__init__()
565
+ self.transform = BertPredictionHeadTransform(config)
566
+
567
+ # The output weights are the same as the input embeddings, but there is
568
+ # an output-only bias for each token.
569
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
570
+
571
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
572
+
573
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
574
+ self.decoder.bias = self.bias
575
+
576
+ def forward(self, hidden_states):
577
+ hidden_states = self.transform(hidden_states)
578
+ hidden_states = self.decoder(hidden_states)
579
+ return hidden_states
580
+
581
+
582
+ class BertOnlyMLMHead(nn.Module):
583
+ def __init__(self, config):
584
+ super().__init__()
585
+ self.predictions = BertLMPredictionHead(config)
586
+
587
+ def forward(self, sequence_output):
588
+ prediction_scores = self.predictions(sequence_output)
589
+ return prediction_scores
590
+
591
+
592
+ class BertPreTrainedModel(PreTrainedModel):
593
+ """
594
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
595
+ models.
596
+ """
597
+
598
+ config_class = BertConfig
599
+ base_model_prefix = "bert"
600
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
601
+
602
+ def _init_weights(self, module):
603
+ """Initialize the weights"""
604
+ if isinstance(module, (nn.Linear, nn.Embedding)):
605
+ # Slightly different from the TF version which uses truncated_normal for initialization
606
+ # cf https://github.com/pytorch/pytorch/pull/5617
607
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
608
+ elif isinstance(module, nn.LayerNorm):
609
+ module.bias.data.zero_()
610
+ module.weight.data.fill_(1.0)
611
+ if isinstance(module, nn.Linear) and module.bias is not None:
612
+ module.bias.data.zero_()
613
+
614
+
615
+ class BertModel(BertPreTrainedModel):
616
+ """
617
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
618
+ cross-attention is added between the self-attention layers, following the architecture described in `Attention is
619
+ all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
620
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
621
+ argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
622
+ input to the forward pass.
623
+ """
624
+
625
+ def __init__(self, config, add_pooling_layer=False):
626
+ super().__init__(config)
627
+ self.config = config
628
+
629
+ self.embeddings = BertEmbeddings(config)
630
+
631
+ self.encoder = BertEncoder(config)
632
+
633
+ self.pooler = BertPooler(config) if add_pooling_layer else None
634
+
635
+ self.init_weights()
636
+
637
+ def get_input_embeddings(self):
638
+ return self.embeddings.word_embeddings
639
+
640
+ def set_input_embeddings(self, value):
641
+ self.embeddings.word_embeddings = value
642
+
643
+ def _prune_heads(self, heads_to_prune):
644
+ """
645
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
646
+ class PreTrainedModel
647
+ """
648
+ for layer, heads in heads_to_prune.items():
649
+ self.encoder.layer[layer].attention.prune_heads(heads)
650
+
651
+ def get_extended_attention_mask(
652
+ self,
653
+ attention_mask: Tensor,
654
+ input_shape: Tuple[int],
655
+ device: device,
656
+ is_decoder: bool,
657
+ has_query: bool = False,
658
+ ) -> Tensor:
659
+ """
660
+ Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
661
+
662
+ Arguments:
663
+ attention_mask (:obj:`torch.Tensor`):
664
+ Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
665
+ input_shape (:obj:`Tuple[int]`):
666
+ The shape of the input to the model.
667
+ device: (:obj:`torch.device`):
668
+ The device of the input to the model.
669
+
670
+ Returns:
671
+ :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
672
+ """
673
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
674
+ # ourselves in which case we just need to make it broadcastable to all heads.
675
+ if attention_mask.dim() == 3:
676
+ extended_attention_mask = attention_mask[:, None, :, :]
677
+ elif attention_mask.dim() == 2:
678
+ # Provided a padding mask of dimensions [batch_size, seq_length]
679
+ # - if the model is a decoder, apply a causal mask in addition to the padding mask
680
+ # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
681
+ if is_decoder:
682
+ batch_size, seq_length = input_shape
683
+
684
+ seq_ids = torch.arange(seq_length, device=device)
685
+ causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
686
+
687
+ # add a prefix ones mask to the causal mask
688
+ # causal and attention masks must have same type with pytorch version < 1.3
689
+ causal_mask = causal_mask.to(attention_mask.dtype)
690
+
691
+ if causal_mask.shape[1] < attention_mask.shape[1]:
692
+ prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
693
+ if has_query: # UniLM style attention mask
694
+ causal_mask = torch.cat(
695
+ [
696
+ torch.zeros(
697
+ (batch_size, prefix_seq_len, seq_length),
698
+ device=device,
699
+ dtype=causal_mask.dtype,
700
+ ),
701
+ causal_mask,
702
+ ],
703
+ axis=1,
704
+ )
705
+ causal_mask = torch.cat(
706
+ [
707
+ torch.ones(
708
+ (batch_size, causal_mask.shape[1], prefix_seq_len),
709
+ device=device,
710
+ dtype=causal_mask.dtype,
711
+ ),
712
+ causal_mask,
713
+ ],
714
+ axis=-1,
715
+ )
716
+ extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
717
+ else:
718
+ extended_attention_mask = attention_mask[:, None, None, :]
719
+ else:
720
+ raise ValueError("Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(input_shape, attention_mask.shape))
721
+
722
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
723
+ # masked positions, this operation will create a tensor which is 0.0 for
724
+ # positions we want to attend and -10000.0 for masked positions.
725
+ # Since we are adding it to the raw scores before the softmax, this is
726
+ # effectively the same as removing these entirely.
727
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
728
+ extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
729
+ return extended_attention_mask
730
+
731
+ def forward(
732
+ self,
733
+ input_ids=None,
734
+ attention_mask=None,
735
+ position_ids=None,
736
+ head_mask=None,
737
+ query_embeds=None,
738
+ encoder_hidden_states=None,
739
+ encoder_attention_mask=None,
740
+ past_key_values=None,
741
+ use_cache=None,
742
+ output_attentions=None,
743
+ output_hidden_states=None,
744
+ return_dict=None,
745
+ is_decoder=False,
746
+ ):
747
+ r"""
748
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
749
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
750
+ the model is configured as a decoder.
751
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
752
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
753
+ the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
754
+ - 1 for tokens that are **not masked**,
755
+ - 0 for tokens that are **masked**.
756
+ past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
757
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
758
+ If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
759
+ (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
760
+ instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
761
+ use_cache (:obj:`bool`, `optional`):
762
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
763
+ decoding (see :obj:`past_key_values`).
764
+ """
765
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
766
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
767
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
768
+
769
+ # use_cache = use_cache if use_cache is not None else self.config.use_cache
770
+
771
+ if input_ids is None:
772
+ assert query_embeds is not None, "You have to specify query_embeds when input_ids is None"
773
+
774
+ # past_key_values_length
775
+ past_key_values_length = past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0
776
+
777
+ query_length = query_embeds.shape[1] if query_embeds is not None else 0
778
+
779
+ embedding_output = self.embeddings(
780
+ input_ids=input_ids,
781
+ position_ids=position_ids,
782
+ query_embeds=query_embeds,
783
+ past_key_values_length=past_key_values_length,
784
+ )
785
+
786
+ input_shape = embedding_output.size()[:-1]
787
+ batch_size, seq_length = input_shape
788
+ device = embedding_output.device
789
+
790
+ if attention_mask is None:
791
+ attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
792
+
793
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
794
+ # ourselves in which case we just need to make it broadcastable to all heads.
795
+ if is_decoder:
796
+ extended_attention_mask = self.get_extended_attention_mask(
797
+ attention_mask,
798
+ input_ids.shape,
799
+ device,
800
+ is_decoder,
801
+ has_query=(query_embeds is not None),
802
+ )
803
+ else:
804
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device, is_decoder)
805
+
806
+ # If a 2D or 3D attention mask is provided for the cross-attention
807
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
808
+ if encoder_hidden_states is not None:
809
+ if type(encoder_hidden_states) == list:
810
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
811
+ else:
812
+ (
813
+ encoder_batch_size,
814
+ encoder_sequence_length,
815
+ _,
816
+ ) = encoder_hidden_states.size()
817
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
818
+
819
+ if type(encoder_attention_mask) == list:
820
+ encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
821
+ elif encoder_attention_mask is None:
822
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
823
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
824
+ else:
825
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
826
+ else:
827
+ encoder_extended_attention_mask = None
828
+
829
+ # Prepare head mask if needed
830
+ # 1.0 in head_mask indicate we keep the head
831
+ # attention_probs has shape bsz x n_heads x N x N
832
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
833
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
834
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
835
+
836
+ encoder_outputs = self.encoder(
837
+ embedding_output,
838
+ attention_mask=extended_attention_mask,
839
+ head_mask=head_mask,
840
+ encoder_hidden_states=encoder_hidden_states,
841
+ encoder_attention_mask=encoder_extended_attention_mask,
842
+ past_key_values=past_key_values,
843
+ use_cache=use_cache,
844
+ output_attentions=output_attentions,
845
+ output_hidden_states=output_hidden_states,
846
+ return_dict=return_dict,
847
+ query_length=query_length,
848
+ )
849
+ sequence_output = encoder_outputs[0]
850
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
851
+
852
+ if not return_dict:
853
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
854
+
855
+ return BaseModelOutputWithPoolingAndCrossAttentions(
856
+ last_hidden_state=sequence_output,
857
+ pooler_output=pooled_output,
858
+ past_key_values=encoder_outputs.past_key_values,
859
+ hidden_states=encoder_outputs.hidden_states,
860
+ attentions=encoder_outputs.attentions,
861
+ cross_attentions=encoder_outputs.cross_attentions,
862
+ )
863
+
864
+
865
+ class BertLMHeadModel(BertPreTrainedModel):
866
+
867
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
868
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
869
+
870
+ def __init__(self, config):
871
+ super().__init__(config)
872
+
873
+ self.bert = BertModel(config, add_pooling_layer=False)
874
+ self.cls = BertOnlyMLMHead(config)
875
+
876
+ self.init_weights()
877
+
878
+ def get_output_embeddings(self):
879
+ return self.cls.predictions.decoder
880
+
881
+ def set_output_embeddings(self, new_embeddings):
882
+ self.cls.predictions.decoder = new_embeddings
883
+
884
+ def forward(
885
+ self,
886
+ input_ids=None,
887
+ attention_mask=None,
888
+ position_ids=None,
889
+ head_mask=None,
890
+ query_embeds=None,
891
+ encoder_hidden_states=None,
892
+ encoder_attention_mask=None,
893
+ labels=None,
894
+ past_key_values=None,
895
+ use_cache=True,
896
+ output_attentions=None,
897
+ output_hidden_states=None,
898
+ return_dict=None,
899
+ return_logits=False,
900
+ is_decoder=True,
901
+ reduction="mean",
902
+ ):
903
+ r"""
904
+ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
905
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
906
+ the model is configured as a decoder.
907
+ encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
908
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
909
+ the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
910
+ - 1 for tokens that are **not masked**,
911
+ - 0 for tokens that are **masked**.
912
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
913
+ Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
914
+ ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
915
+ ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``
916
+ past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
917
+ Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
918
+ If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
919
+ (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
920
+ instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
921
+ use_cache (:obj:`bool`, `optional`):
922
+ If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
923
+ decoding (see :obj:`past_key_values`).
924
+ Returns:
925
+ Example::
926
+ >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
927
+ >>> import torch
928
+ >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
929
+ >>> config = BertConfig.from_pretrained("bert-base-cased")
930
+ >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config)
931
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
932
+ >>> outputs = model(**inputs)
933
+ >>> prediction_logits = outputs.logits
934
+ """
935
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
936
+ if labels is not None:
937
+ use_cache = False
938
+ if past_key_values is not None:
939
+ query_embeds = None
940
+
941
+ outputs = self.bert(
942
+ input_ids,
943
+ attention_mask=attention_mask,
944
+ position_ids=position_ids,
945
+ head_mask=head_mask,
946
+ query_embeds=query_embeds,
947
+ encoder_hidden_states=encoder_hidden_states,
948
+ encoder_attention_mask=encoder_attention_mask,
949
+ past_key_values=past_key_values,
950
+ use_cache=use_cache,
951
+ output_attentions=output_attentions,
952
+ output_hidden_states=output_hidden_states,
953
+ return_dict=return_dict,
954
+ is_decoder=is_decoder,
955
+ )
956
+
957
+ sequence_output = outputs[0]
958
+ if query_embeds is not None:
959
+ sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
960
+
961
+ prediction_scores = self.cls(sequence_output)
962
+
963
+ if return_logits:
964
+ return prediction_scores[:, :-1, :].contiguous()
965
+
966
+ lm_loss = None
967
+ if labels is not None:
968
+ # we are doing next-token prediction; shift prediction scores and input ids by one
969
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
970
+ labels = labels[:, 1:].contiguous()
971
+ loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1)
972
+ lm_loss = loss_fct(
973
+ shifted_prediction_scores.view(-1, self.config.vocab_size),
974
+ labels.view(-1),
975
+ )
976
+ if reduction == "none":
977
+ lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1)
978
+
979
+ if not return_dict:
980
+ output = (prediction_scores,) + outputs[2:]
981
+ return ((lm_loss,) + output) if lm_loss is not None else output
982
+
983
+ return CausalLMOutputWithCrossAttentions(
984
+ loss=lm_loss,
985
+ logits=prediction_scores,
986
+ past_key_values=outputs.past_key_values,
987
+ hidden_states=outputs.hidden_states,
988
+ attentions=outputs.attentions,
989
+ cross_attentions=outputs.cross_attentions,
990
+ )
991
+
992
+ def prepare_inputs_for_generation(self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs):
993
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
994
+ if attention_mask is None:
995
+ attention_mask = input_ids.new_ones(input_ids.shape)
996
+ query_mask = input_ids.new_ones(query_embeds.shape[:-1])
997
+ attention_mask = torch.cat([query_mask, attention_mask], dim=-1)
998
+
999
+ # cut decoder_input_ids if past is used
1000
+ if past is not None:
1001
+ input_ids = input_ids[:, -1:]
1002
+
1003
+ return {
1004
+ "input_ids": input_ids,
1005
+ "query_embeds": query_embeds,
1006
+ "attention_mask": attention_mask,
1007
+ "past_key_values": past,
1008
+ "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None),
1009
+ "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None),
1010
+ "is_decoder": True,
1011
+ }
1012
+
1013
+ def _reorder_cache(self, past, beam_idx):
1014
+ reordered_past = ()
1015
+ for layer_past in past:
1016
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
1017
+ return reordered_past
1018
+
1019
+
1020
+ class BertForMaskedLM(BertPreTrainedModel):
1021
+
1022
+ _keys_to_ignore_on_load_unexpected = [r"pooler"]
1023
+ _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
1024
+
1025
+ def __init__(self, config):
1026
+ super().__init__(config)
1027
+
1028
+ self.bert = BertModel(config, add_pooling_layer=False)
1029
+ self.cls = BertOnlyMLMHead(config)
1030
+
1031
+ self.init_weights()
1032
+
1033
+ def get_output_embeddings(self):
1034
+ return self.cls.predictions.decoder
1035
+
1036
+ def set_output_embeddings(self, new_embeddings):
1037
+ self.cls.predictions.decoder = new_embeddings
1038
+
1039
+ def forward(
1040
+ self,
1041
+ input_ids=None,
1042
+ attention_mask=None,
1043
+ position_ids=None,
1044
+ head_mask=None,
1045
+ query_embeds=None,
1046
+ encoder_hidden_states=None,
1047
+ encoder_attention_mask=None,
1048
+ labels=None,
1049
+ output_attentions=None,
1050
+ output_hidden_states=None,
1051
+ return_dict=None,
1052
+ return_logits=False,
1053
+ is_decoder=False,
1054
+ ):
1055
+ r"""
1056
+ labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
1057
+ Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
1058
+ config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
1059
+ (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
1060
+ """
1061
+
1062
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1063
+
1064
+ outputs = self.bert(
1065
+ input_ids,
1066
+ attention_mask=attention_mask,
1067
+ position_ids=position_ids,
1068
+ head_mask=head_mask,
1069
+ query_embeds=query_embeds,
1070
+ encoder_hidden_states=encoder_hidden_states,
1071
+ encoder_attention_mask=encoder_attention_mask,
1072
+ output_attentions=output_attentions,
1073
+ output_hidden_states=output_hidden_states,
1074
+ return_dict=return_dict,
1075
+ is_decoder=is_decoder,
1076
+ )
1077
+
1078
+ if query_embeds is not None:
1079
+ sequence_output = outputs[0][:, query_embeds.shape[1] :, :]
1080
+ prediction_scores = self.cls(sequence_output)
1081
+
1082
+ if return_logits:
1083
+ return prediction_scores
1084
+
1085
+ masked_lm_loss = None
1086
+ if labels is not None:
1087
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
1088
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
1089
+
1090
+ if not return_dict:
1091
+ output = (prediction_scores,) + outputs[2:]
1092
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
1093
+
1094
+ return MaskedLMOutput(
1095
+ loss=masked_lm_loss,
1096
+ logits=prediction_scores,
1097
+ hidden_states=outputs.hidden_states,
1098
+ attentions=outputs.attentions,
1099
+ )
1100
+
1101
+
1102
+ class Qformer(nn.Module):
1103
+ def __init__(self, model_args, vision_tower):
1104
+ super().__init__()
1105
+
1106
+ self.depth = model_args.mm_qformer_depth
1107
+ self.num_latents = model_args.mm_qformer_latents
1108
+ self.pretrained = model_args.mm_qformer_pretrained
1109
+
1110
+ self.Qformer, self.query_tokens, self.ln_vision = self.build_Qformer(vision_tower.hidden_size, self.depth, self.num_latents)
1111
+
1112
+ if self.pretrained is not None:
1113
+ pretrained_dict = torch.load(self.pretrained, map_location="cpu")["model"]
1114
+ pretrained_dict = {k: v for k, v in pretrained_dict.items() if not k.startswith("t5_proj")}
1115
+ self.load_state_dict(pretrained_dict)
1116
+
1117
+ def build_Qformer(self, vision_width, cross_attention_freq, num_query_token):
1118
+ encoder_config = BertConfig.from_pretrained("bert-base-uncased")
1119
+ encoder_config.encoder_width = vision_width
1120
+ # insert cross-attention layer every other block
1121
+ encoder_config.add_cross_attention = True
1122
+ encoder_config.cross_attention_freq = cross_attention_freq
1123
+ encoder_config.query_length = num_query_token
1124
+ Qformer = BertLMHeadModel(config=encoder_config)
1125
+ query_tokens = nn.Parameter(torch.zeros(1, num_query_token, encoder_config.hidden_size))
1126
+ query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
1127
+ Qformer.cls = None
1128
+ Qformer.bert.embeddings.word_embeddings = None
1129
+ Qformer.bert.embeddings.position_embeddings = None
1130
+ for layer in Qformer.bert.encoder.layer:
1131
+ layer.output = None
1132
+ layer.intermediate = None
1133
+ return Qformer, query_tokens, nn.LayerNorm(vision_width)
1134
+
1135
+ def forward(self, image_features, *args, **kwargs):
1136
+ x = self.ln_vision(image_features)
1137
+ image_atts = torch.ones(x.size()[:-1], dtype=torch.long).to(x.device)
1138
+
1139
+ query_tokens = self.query_tokens.expand(x.shape[0], -1, -1)
1140
+ query_output = self.Qformer.bert(
1141
+ query_embeds=query_tokens,
1142
+ encoder_hidden_states=x,
1143
+ encoder_attention_mask=image_atts,
1144
+ return_dict=True,
1145
+ )
1146
+
1147
+ return query_output.last_hidden_state
1148
+
1149
+ @property
1150
+ def hidden_size(self):
1151
+ return 768
1152
+
1153
+ @property
1154
+ def config(self):
1155
+ return {
1156
+ "mm_resampler_type": "qformer",
1157
+ "mm_qformer_depth": self.depth,
1158
+ "mm_qformer_latents": self.num_latents,
1159
+ "mm_qformer_pretrained": self.pretrained,
1160
+ }
VLMEvalKit-sudoku/llava/model/multimodal_resampler/spatial_pool.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import math
4
+
5
+
6
+ class SpatialPool(nn.Module):
7
+ def __init__(self, model_args, vision_tower):
8
+ super().__init__()
9
+
10
+ self.mode = model_args.mm_spatial_pool_mode
11
+ self.stride = model_args.mm_spatial_pool_stride
12
+ self.out_channels = getattr(model_args, "mm_spatial_pool_out_channels", vision_tower.hidden_size)
13
+
14
+ if self.mode == "average":
15
+ self.pool = nn.AvgPool2d(kernel_size=self.stride, stride=self.stride)
16
+ elif self.mode == "max":
17
+ self.pool = nn.MaxPool2d(kernel_size=self.stride, stride=self.stride)
18
+ elif self.mode == "conv":
19
+ self.pool = nn.Conv2d(in_channels=vision_tower.hidden_size, out_channels=self.out_channels, kernel_size=self.stride, stride=self.stride)
20
+ else:
21
+ raise ValueError(f"Unknown pooling mode: {self.pool}.")
22
+
23
+ def forward(self, image_features, images, *args, **kwargs):
24
+ ori_W = int(math.sqrt(image_features.shape[1] * images.shape[3] // images.shape[2]))
25
+ ori_H = int(ori_W * images.shape[2] // images.shape[3])
26
+
27
+ B, _, F = image_features.shape
28
+
29
+ image_features_spatial = image_features.view(B, ori_H, ori_H, F).permute(0, 3, 1, 2)
30
+ image_features_spatial_pool = self.pool(image_features_spatial)
31
+
32
+ return image_features_spatial_pool.flatten(2).transpose(1, 2).contiguous()
33
+
34
+ @property
35
+ def config(self):
36
+ return {
37
+ "mm_resampler_type": "spatial_pool",
38
+ "mm_spatial_pool_stride": self.stride,
39
+ "mm_spatial_pool_mode": self.mode,
40
+ "mm_spatial_pool_out_channels": self.out_channels,
41
+ }
42
+
43
+ @property
44
+ def hidden_size(self):
45
+ return self.out_channels
VLMEvalKit-sudoku/llava/serve/cli.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+
4
+ from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
5
+ from llava.conversation import conv_templates, SeparatorStyle
6
+ from llava.model.builder import load_pretrained_model
7
+ from llava.utils import disable_torch_init
8
+ from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
9
+
10
+ from PIL import Image
11
+
12
+ import requests
13
+ from PIL import Image
14
+ from io import BytesIO
15
+ from transformers import TextStreamer
16
+
17
+
18
+ def load_image(image_file):
19
+ if image_file.startswith("http") or image_file.startswith("https"):
20
+ response = requests.get(image_file)
21
+ image = Image.open(BytesIO(response.content)).convert("RGB")
22
+ else:
23
+ image = Image.open(image_file).convert("RGB")
24
+ return image
25
+
26
+
27
+ def main(args):
28
+ # Model
29
+ disable_torch_init()
30
+
31
+ model_name = get_model_name_from_path(args.model_path)
32
+ tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
33
+
34
+ if "llama-2" in model_name.lower():
35
+ conv_mode = "llava_llama_2"
36
+ elif "v1" in model_name.lower():
37
+ conv_mode = "llava_v1"
38
+ elif "mpt" in model_name.lower():
39
+ conv_mode = "mpt"
40
+ else:
41
+ conv_mode = "llava_v0"
42
+
43
+ if args.conv_mode is not None and conv_mode != args.conv_mode:
44
+ print("[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}".format(conv_mode, args.conv_mode, args.conv_mode))
45
+ else:
46
+ args.conv_mode = conv_mode
47
+
48
+ conv = conv_templates[args.conv_mode].copy()
49
+ if "mpt" in model_name.lower():
50
+ roles = ("user", "assistant")
51
+ else:
52
+ roles = conv.roles
53
+
54
+ image = load_image(args.image_file)
55
+ image_tensor = image_processor.preprocess(image, return_tensors="pt")["pixel_values"].half().cuda()
56
+
57
+ while True:
58
+ try:
59
+ inp = input(f"{roles[0]}: ")
60
+ except EOFError:
61
+ inp = ""
62
+ if not inp:
63
+ print("exit...")
64
+ break
65
+
66
+ print(f"{roles[1]}: ", end="")
67
+
68
+ if image is not None:
69
+ # first message
70
+ if model.config.mm_use_im_start_end:
71
+ inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + "\n" + inp
72
+ else:
73
+ inp = DEFAULT_IMAGE_TOKEN + "\n" + inp
74
+ conv.append_message(conv.roles[0], inp)
75
+ image = None
76
+ else:
77
+ # later messages
78
+ conv.append_message(conv.roles[0], inp)
79
+ conv.append_message(conv.roles[1], None)
80
+ prompt = conv.get_prompt()
81
+
82
+ input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).cuda()
83
+ stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
84
+ keywords = [stop_str]
85
+ stopping_criteria = KeywordsStoppingCriteria(keywords, tokenizer, input_ids)
86
+ streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
87
+
88
+ with torch.inference_mode():
89
+ output_ids = model.generate(input_ids, images=image_tensor, do_sample=True, temperature=0.2, max_new_tokens=1024, streamer=streamer, use_cache=True, stopping_criteria=[stopping_criteria])
90
+
91
+ outputs = tokenizer.decode(output_ids[0, input_ids.shape[1] :]).strip()
92
+ conv.messages[-1][-1] = outputs
93
+
94
+ if args.debug:
95
+ print("\n", {"prompt": prompt, "outputs": outputs}, "\n")
96
+
97
+
98
+ if __name__ == "__main__":
99
+ parser = argparse.ArgumentParser()
100
+ parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
101
+ parser.add_argument("--model-base", type=str, default=None)
102
+ parser.add_argument("--image-file", type=str, required=True)
103
+ parser.add_argument("--num-gpus", type=int, default=1)
104
+ parser.add_argument("--conv-mode", type=str, default=None)
105
+ parser.add_argument("--temperature", type=float, default=0.2)
106
+ parser.add_argument("--max-new-tokens", type=int, default=512)
107
+ parser.add_argument("--load-8bit", action="store_true")
108
+ parser.add_argument("--load-4bit", action="store_true")
109
+ parser.add_argument("--debug", action="store_true")
110
+ args = parser.parse_args()
111
+ main(args)
VLMEvalKit-sudoku/llava/serve/controller.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A controller manages distributed workers.
3
+ It sends worker addresses to clients.
4
+ """
5
+
6
+ import argparse
7
+ import asyncio
8
+ import dataclasses
9
+ from enum import Enum, auto
10
+ import json
11
+ import logging
12
+ import time
13
+ from typing import List, Union
14
+ import threading
15
+
16
+ from fastapi import FastAPI, Request
17
+ from fastapi.responses import StreamingResponse
18
+ import numpy as np
19
+ import requests
20
+ import uvicorn
21
+
22
+ from llava.constants import CONTROLLER_HEART_BEAT_EXPIRATION
23
+ from llava.utils import build_logger, server_error_msg
24
+
25
+
26
+ logger = build_logger("controller", "controller.log")
27
+
28
+
29
+ class DispatchMethod(Enum):
30
+ LOTTERY = auto()
31
+ SHORTEST_QUEUE = auto()
32
+
33
+ @classmethod
34
+ def from_str(cls, name):
35
+ if name == "lottery":
36
+ return cls.LOTTERY
37
+ elif name == "shortest_queue":
38
+ return cls.SHORTEST_QUEUE
39
+ else:
40
+ raise ValueError(f"Invalid dispatch method")
41
+
42
+
43
+ @dataclasses.dataclass
44
+ class WorkerInfo:
45
+ model_names: List[str]
46
+ speed: int
47
+ queue_length: int
48
+ check_heart_beat: bool
49
+ last_heart_beat: str
50
+
51
+
52
+ def heart_beat_controller(controller):
53
+ while True:
54
+ time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION)
55
+ controller.remove_stable_workers_by_expiration()
56
+
57
+
58
+ class Controller:
59
+ def __init__(self, dispatch_method: str):
60
+ # Dict[str -> WorkerInfo]
61
+ self.worker_info = {}
62
+ self.dispatch_method = DispatchMethod.from_str(dispatch_method)
63
+
64
+ self.heart_beat_thread = threading.Thread(target=heart_beat_controller, args=(self,))
65
+ self.heart_beat_thread.start()
66
+
67
+ logger.info("Init controller")
68
+
69
+ def register_worker(self, worker_name: str, check_heart_beat: bool, worker_status: dict):
70
+ if worker_name not in self.worker_info:
71
+ logger.info(f"Register a new worker: {worker_name}")
72
+ else:
73
+ logger.info(f"Register an existing worker: {worker_name}")
74
+
75
+ if not worker_status:
76
+ worker_status = self.get_worker_status(worker_name)
77
+ if not worker_status:
78
+ return False
79
+
80
+ self.worker_info[worker_name] = WorkerInfo(worker_status["model_names"], worker_status["speed"], worker_status["queue_length"], check_heart_beat, time.time())
81
+
82
+ logger.info(f"Register done: {worker_name}, {worker_status}")
83
+ return True
84
+
85
+ def get_worker_status(self, worker_name: str):
86
+ try:
87
+ r = requests.post(worker_name + "/worker_get_status", timeout=5)
88
+ except requests.exceptions.RequestException as e:
89
+ logger.error(f"Get status fails: {worker_name}, {e}")
90
+ return None
91
+
92
+ if r.status_code != 200:
93
+ logger.error(f"Get status fails: {worker_name}, {r}")
94
+ return None
95
+
96
+ return r.json()
97
+
98
+ def remove_worker(self, worker_name: str):
99
+ del self.worker_info[worker_name]
100
+
101
+ def refresh_all_workers(self):
102
+ old_info = dict(self.worker_info)
103
+ self.worker_info = {}
104
+
105
+ for w_name, w_info in old_info.items():
106
+ if not self.register_worker(w_name, w_info.check_heart_beat, None):
107
+ logger.info(f"Remove stale worker: {w_name}")
108
+
109
+ def list_models(self):
110
+ model_names = set()
111
+
112
+ for w_name, w_info in self.worker_info.items():
113
+ model_names.update(w_info.model_names)
114
+
115
+ return list(model_names)
116
+
117
+ def get_worker_address(self, model_name: str):
118
+ if self.dispatch_method == DispatchMethod.LOTTERY:
119
+ worker_names = []
120
+ worker_speeds = []
121
+ for w_name, w_info in self.worker_info.items():
122
+ if model_name in w_info.model_names:
123
+ worker_names.append(w_name)
124
+ worker_speeds.append(w_info.speed)
125
+ worker_speeds = np.array(worker_speeds, dtype=np.float32)
126
+ norm = np.sum(worker_speeds)
127
+ if norm < 1e-4:
128
+ return ""
129
+ worker_speeds = worker_speeds / norm
130
+ if True: # Directly return address
131
+ pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
132
+ worker_name = worker_names[pt]
133
+ return worker_name
134
+
135
+ # Check status before returning
136
+ while True:
137
+ pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
138
+ worker_name = worker_names[pt]
139
+
140
+ if self.get_worker_status(worker_name):
141
+ break
142
+ else:
143
+ self.remove_worker(worker_name)
144
+ worker_speeds[pt] = 0
145
+ norm = np.sum(worker_speeds)
146
+ if norm < 1e-4:
147
+ return ""
148
+ worker_speeds = worker_speeds / norm
149
+ continue
150
+ return worker_name
151
+ elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE:
152
+ worker_names = []
153
+ worker_qlen = []
154
+ for w_name, w_info in self.worker_info.items():
155
+ if model_name in w_info.model_names:
156
+ worker_names.append(w_name)
157
+ worker_qlen.append(w_info.queue_length / w_info.speed)
158
+ if len(worker_names) == 0:
159
+ return ""
160
+ min_index = np.argmin(worker_qlen)
161
+ w_name = worker_names[min_index]
162
+ self.worker_info[w_name].queue_length += 1
163
+ logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}")
164
+ return w_name
165
+ else:
166
+ raise ValueError(f"Invalid dispatch method: {self.dispatch_method}")
167
+
168
+ def receive_heart_beat(self, worker_name: str, queue_length: int):
169
+ if worker_name not in self.worker_info:
170
+ logger.info(f"Receive unknown heart beat. {worker_name}")
171
+ return False
172
+
173
+ self.worker_info[worker_name].queue_length = queue_length
174
+ self.worker_info[worker_name].last_heart_beat = time.time()
175
+ logger.info(f"Receive heart beat. {worker_name}")
176
+ return True
177
+
178
+ def remove_stable_workers_by_expiration(self):
179
+ expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION
180
+ to_delete = []
181
+ for worker_name, w_info in self.worker_info.items():
182
+ if w_info.check_heart_beat and w_info.last_heart_beat < expire:
183
+ to_delete.append(worker_name)
184
+
185
+ for worker_name in to_delete:
186
+ self.remove_worker(worker_name)
187
+
188
+ def worker_api_generate_stream(self, params):
189
+ worker_addr = self.get_worker_address(params["model"])
190
+ if not worker_addr:
191
+ logger.info(f"no worker: {params['model']}")
192
+ ret = {
193
+ "text": server_error_msg,
194
+ "error_code": 2,
195
+ }
196
+ yield json.dumps(ret).encode() + b"\0"
197
+
198
+ try:
199
+ response = requests.post(worker_addr + "/worker_generate_stream", json=params, stream=True, timeout=5)
200
+ for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
201
+ if chunk:
202
+ yield chunk + b"\0"
203
+ except requests.exceptions.RequestException as e:
204
+ logger.info(f"worker timeout: {worker_addr}")
205
+ ret = {
206
+ "text": server_error_msg,
207
+ "error_code": 3,
208
+ }
209
+ yield json.dumps(ret).encode() + b"\0"
210
+
211
+ # Let the controller act as a worker to achieve hierarchical
212
+ # management. This can be used to connect isolated sub networks.
213
+ def worker_api_get_status(self):
214
+ model_names = set()
215
+ speed = 0
216
+ queue_length = 0
217
+
218
+ for w_name in self.worker_info:
219
+ worker_status = self.get_worker_status(w_name)
220
+ if worker_status is not None:
221
+ model_names.update(worker_status["model_names"])
222
+ speed += worker_status["speed"]
223
+ queue_length += worker_status["queue_length"]
224
+
225
+ return {
226
+ "model_names": list(model_names),
227
+ "speed": speed,
228
+ "queue_length": queue_length,
229
+ }
230
+
231
+
232
+ app = FastAPI()
233
+
234
+
235
+ @app.post("/register_worker")
236
+ async def register_worker(request: Request):
237
+ data = await request.json()
238
+ controller.register_worker(data["worker_name"], data["check_heart_beat"], data.get("worker_status", None))
239
+
240
+
241
+ @app.post("/refresh_all_workers")
242
+ async def refresh_all_workers():
243
+ models = controller.refresh_all_workers()
244
+
245
+
246
+ @app.post("/list_models")
247
+ async def list_models():
248
+ models = controller.list_models()
249
+ return {"models": models}
250
+
251
+
252
+ @app.post("/get_worker_address")
253
+ async def get_worker_address(request: Request):
254
+ data = await request.json()
255
+ addr = controller.get_worker_address(data["model"])
256
+ return {"address": addr}
257
+
258
+
259
+ @app.post("/receive_heart_beat")
260
+ async def receive_heart_beat(request: Request):
261
+ data = await request.json()
262
+ exist = controller.receive_heart_beat(data["worker_name"], data["queue_length"])
263
+ return {"exist": exist}
264
+
265
+
266
+ @app.post("/worker_generate_stream")
267
+ async def worker_api_generate_stream(request: Request):
268
+ params = await request.json()
269
+ generator = controller.worker_api_generate_stream(params)
270
+ return StreamingResponse(generator)
271
+
272
+
273
+ @app.post("/worker_get_status")
274
+ async def worker_api_get_status(request: Request):
275
+ return controller.worker_api_get_status()
276
+
277
+
278
+ if __name__ == "__main__":
279
+ parser = argparse.ArgumentParser()
280
+ parser.add_argument("--host", type=str, default="localhost")
281
+ parser.add_argument("--port", type=int, default=21001)
282
+ parser.add_argument("--dispatch-method", type=str, choices=["lottery", "shortest_queue"], default="shortest_queue")
283
+ args = parser.parse_args()
284
+ logger.info(f"args: {args}")
285
+
286
+ controller = Controller(args.dispatch_method)
287
+ uvicorn.run(app, host=args.host, port=args.port, log_level="info")
VLMEvalKit-sudoku/llava/serve/test_message.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+
4
+ import requests
5
+
6
+ from llava.conversation import default_conversation
7
+
8
+
9
+ def main():
10
+ if args.worker_address:
11
+ worker_addr = args.worker_address
12
+ else:
13
+ controller_addr = args.controller_address
14
+ ret = requests.post(controller_addr + "/refresh_all_workers")
15
+ ret = requests.post(controller_addr + "/list_models")
16
+ models = ret.json()["models"]
17
+ models.sort()
18
+ print(f"Models: {models}")
19
+
20
+ ret = requests.post(controller_addr + "/get_worker_address", json={"model": args.model_name})
21
+ worker_addr = ret.json()["address"]
22
+ print(f"worker_addr: {worker_addr}")
23
+
24
+ if worker_addr == "":
25
+ return
26
+
27
+ conv = default_conversation.copy()
28
+ conv.append_message(conv.roles[0], args.message)
29
+ prompt = conv.get_prompt()
30
+
31
+ headers = {"User-Agent": "LLaVA Client"}
32
+ pload = {
33
+ "model": args.model_name,
34
+ "prompt": prompt,
35
+ "max_new_tokens": args.max_new_tokens,
36
+ "temperature": 0.7,
37
+ "stop": conv.sep,
38
+ }
39
+ response = requests.post(worker_addr + "/worker_generate_stream", headers=headers, json=pload, stream=True)
40
+
41
+ print(prompt.replace(conv.sep, "\n"), end="")
42
+ for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"):
43
+ if chunk:
44
+ data = json.loads(chunk.decode("utf-8"))
45
+ output = data["text"].split(conv.sep)[-1]
46
+ print(output, end="\r")
47
+ print("")
48
+
49
+
50
+ if __name__ == "__main__":
51
+ parser = argparse.ArgumentParser()
52
+ parser.add_argument("--controller-address", type=str, default="http://localhost:21001")
53
+ parser.add_argument("--worker-address", type=str)
54
+ parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
55
+ parser.add_argument("--max-new-tokens", type=int, default=32)
56
+ parser.add_argument("--message", type=str, default="Tell me a story with more than 1000 words.")
57
+ args = parser.parse_args()
58
+
59
+ main()
VLMEvalKit-sudoku/llava/train/__pycache__/llava_trainer.cpython-310.pyc ADDED
Binary file (22.4 kB). View file
 
VLMEvalKit-sudoku/llava/train/__pycache__/train.cpython-310.pyc ADDED
Binary file (57.5 kB). View file
 
VLMEvalKit-sudoku/llava/train/llama_flash_attn_monkey_patch.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+ import warnings
3
+
4
+ import torch
5
+
6
+ import transformers
7
+ from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv
8
+
9
+ try:
10
+ from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
11
+ except ImportError:
12
+ from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
13
+ from flash_attn.bert_padding import unpad_input, pad_input
14
+
15
+
16
+ def forward(
17
+ self,
18
+ hidden_states: torch.Tensor,
19
+ attention_mask: Optional[torch.Tensor] = None,
20
+ position_ids: Optional[torch.Tensor] = None,
21
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
22
+ output_attentions: bool = False,
23
+ use_cache: bool = False,
24
+ padding_mask: Optional[torch.Tensor] = None,
25
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
26
+ if output_attentions:
27
+ warnings.warn("Output attentions is not supported for patched `LlamaAttention`, returning `None` instead.")
28
+
29
+ bsz, q_len, _ = hidden_states.size()
30
+
31
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
32
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
33
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) # shape: (b, num_heads, s, head_dim)
34
+
35
+ kv_seq_len = key_states.shape[-2]
36
+ if past_key_value is not None:
37
+ kv_seq_len += past_key_value[0].shape[-2]
38
+
39
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
40
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
41
+
42
+ if past_key_value is not None:
43
+ # reuse k, v
44
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
45
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
46
+
47
+ past_key_value = (key_states, value_states) if use_cache else None
48
+
49
+ # repeat k/v heads if n_kv_heads < n_heads
50
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
51
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
52
+
53
+ # Transform the data into the format required by flash attention
54
+ qkv = torch.stack([query_states, key_states, value_states], dim=2)
55
+ qkv = qkv.transpose(1, 3) # shape: [b, s, 3, num_heads, head_dim]
56
+ key_padding_mask = attention_mask
57
+
58
+ if key_padding_mask is None:
59
+ qkv = qkv.reshape(-1, 3, self.num_heads, self.head_dim)
60
+ cu_q_lens = torch.arange(0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device)
61
+ max_s = q_len
62
+ output = flash_attn_unpadded_qkvpacked_func(qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
63
+ output = output.view(bsz, q_len, -1)
64
+ else:
65
+ qkv = qkv.reshape(bsz, q_len, -1)
66
+ qkv, indices, cu_q_lens, max_s = unpad_input(qkv, key_padding_mask)
67
+ qkv = qkv.view(-1, 3, self.num_heads, self.head_dim)
68
+ output_unpad = flash_attn_unpadded_qkvpacked_func(qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True)
69
+ output_unpad = output_unpad.reshape(-1, self.num_heads * self.head_dim)
70
+ output = pad_input(output_unpad, indices, bsz, q_len)
71
+
72
+ return self.o_proj(output), None, past_key_value
73
+
74
+
75
+ # Disable the transformation of the attention mask in LlamaModel as the flash attention
76
+ # requires the attention mask to be the same as the key_padding_mask
77
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
78
+ # [bsz, seq_len]
79
+ return attention_mask
80
+
81
+
82
+ def replace_llama_attn_with_flash_attn():
83
+ cuda_major, cuda_minor = torch.cuda.get_device_capability()
84
+ if cuda_major < 8:
85
+ warnings.warn("Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward." "ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593")
86
+ transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = _prepare_decoder_attention_mask
87
+ transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
VLMEvalKit-sudoku/llava/train/llava_trainer.py ADDED
@@ -0,0 +1,557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ import datetime
5
+
6
+ from accelerate import Accelerator
7
+ from accelerate.utils import InitProcessGroupKwargs, GradientAccumulationPlugin, DataLoaderConfiguration
8
+ from torch.utils.data import Dataset, Sampler, DataLoader
9
+
10
+ from trl.trainer import DPOTrainer
11
+ from trl.trainer.utils import DPODataCollatorWithPadding
12
+
13
+ from transformers import Trainer
14
+
15
+
16
+ from transformers.utils import is_sagemaker_mp_enabled, logging, is_accelerate_available, is_datasets_available
17
+ logger = logging.get_logger(__name__)
18
+ from transformers.trainer_pt_utils import get_parameter_names
19
+ from transformers.trainer_utils import has_length
20
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
21
+
22
+ from transformers.trainer_utils import seed_worker
23
+ from transformers.trainer_pt_utils import get_length_grouped_indices as get_length_grouped_indices_hf
24
+ from transformers.trainer_pt_utils import AcceleratorConfig
25
+ from typing import List, Optional
26
+ from datetime import timedelta
27
+
28
+ if is_accelerate_available():
29
+ from accelerate import Accelerator, skip_first_batches, InitProcessGroupKwargs
30
+
31
+ if is_datasets_available():
32
+ import datasets
33
+
34
+ from llava.utils import rank0_print
35
+
36
+
37
+ def maybe_zero_3(param, ignore_status=False, name=None):
38
+ from deepspeed import zero
39
+ from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
40
+
41
+ if hasattr(param, "ds_id"):
42
+ if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
43
+ if not ignore_status:
44
+ print(name, "no ignore status")
45
+ with zero.GatheredParameters([param]):
46
+ param = param.data.detach().cpu().clone()
47
+ else:
48
+ param = param.detach().cpu().clone()
49
+ return param
50
+
51
+
52
+ def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
53
+ to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
54
+ to_return = {k: maybe_zero_3(v, ignore_status=True, name=k).cpu() for k, v in to_return.items()}
55
+ return to_return
56
+
57
+
58
+ def split_to_even_chunks(indices, lengths, num_chunks):
59
+ """
60
+ Split a list of indices into `chunks` chunks of roughly equal lengths.
61
+ """
62
+
63
+ if len(indices) % num_chunks != 0:
64
+ return [indices[i::num_chunks] for i in range(num_chunks)]
65
+
66
+ num_indices_per_chunk = len(indices) // num_chunks
67
+
68
+ chunks = [[] for _ in range(num_chunks)]
69
+ chunks_lengths = [0 for _ in range(num_chunks)]
70
+ for index in indices:
71
+ shortest_chunk = chunks_lengths.index(min(chunks_lengths))
72
+ chunks[shortest_chunk].append(index)
73
+ chunks_lengths[shortest_chunk] += lengths[index]
74
+ if len(chunks[shortest_chunk]) == num_indices_per_chunk:
75
+ chunks_lengths[shortest_chunk] = float("inf")
76
+
77
+ return chunks
78
+
79
+
80
+ def get_variable_length_grouped_indices(lengths, batch_size, world_size, megabatch_mult=8, generator=None):
81
+ # We need to use torch for the random part as a distributed sampler will set the random seed for torch.
82
+ indices = torch.randperm(len(lengths), generator=generator)
83
+ sorted_indices = sorted(range(len(lengths)), key=lambda i: lengths[i], reverse=True)
84
+ megabatch_size = world_size * batch_size * megabatch_mult
85
+ megabatches = [sorted_indices[i : i + megabatch_size] for i in range(0, len(lengths), megabatch_size)]
86
+ megabatches = [sorted(megabatch, key=lambda i: indices[i], reverse=True) for megabatch in megabatches]
87
+ shuffled_indices = [i for megabatch in megabatches for i in megabatch]
88
+ world_batch_size = world_size * batch_size
89
+ batches = [shuffled_indices[i : i + world_batch_size] for i in range(0, len(lengths), world_batch_size)]
90
+ batch_indices = torch.randperm(len(batches), generator=generator)
91
+ batches = [batches[i] for i in batch_indices]
92
+
93
+ return [i for batch in batches for i in batch]
94
+
95
+
96
+ def get_modality_length_grouped_indices(lengths, batch_size, world_size, generator=None):
97
+ """
98
+ Return a list of indices so that each slice of `batch_size` consecutive indices correspond to elements of similar
99
+ lengths. To do this, the indices are:
100
+
101
+ - randomly permuted
102
+ - grouped in mega-batches of size `mega_batch_mult * batch_size`
103
+ - reorder by length in each mega-batch
104
+
105
+ The result is the concatenation of all mega-batches, with the batch of `batch_size` containing the element of
106
+ maximum length placed first, so that an OOM happens sooner rather than later.
107
+ """
108
+
109
+ # We need to use torch for the random part as a distributed sampler will set the random seed for torch.
110
+ assert all(l != 0 for l in lengths), "Should not have zero length."
111
+ if all(l > 0 for l in lengths) or all(l < 0 for l in lengths):
112
+ # all samples are in the same modality
113
+ return get_length_grouped_indices(lengths, batch_size, world_size, generator=generator)
114
+ mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0])
115
+ lang_indices, lang_lengths = zip(*[(i, -l) for i, l in enumerate(lengths) if l < 0])
116
+
117
+ mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices(mm_lengths, batch_size, world_size, generator=None)]
118
+ lang_shuffle = [lang_indices[i] for i in get_length_grouped_indices(lang_lengths, batch_size, world_size, generator=None)]
119
+ megabatch_size = world_size * batch_size
120
+ mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)]
121
+ lang_megabatches = [lang_shuffle[i : i + megabatch_size] for i in range(0, len(lang_shuffle), megabatch_size)]
122
+
123
+ last_mm = mm_megabatches[-1]
124
+ last_lang = lang_megabatches[-1]
125
+ additional_batch = last_mm + last_lang
126
+ megabatches = mm_megabatches[:-1] + lang_megabatches[:-1]
127
+ megabatch_indices = torch.randperm(len(megabatches), generator=generator)
128
+ megabatches = [megabatches[i] for i in megabatch_indices]
129
+
130
+ if len(additional_batch) > 0:
131
+ megabatches.append(sorted(additional_batch))
132
+
133
+ return [i for megabatch in megabatches for i in megabatch]
134
+
135
+
136
+ def get_length_grouped_indices(lengths, batch_size, world_size, generator=None, merge=True):
137
+ """
138
+ Return a list of indices so that each slice of `batch_size` consecutive indices correspond to elements of similar
139
+ lengths. To do this, the indices are:
140
+
141
+ - randomly permuted
142
+ - grouped in mega-batches of size `mega_batch_mult * batch_size`
143
+ - reorder by length in each mega-batch
144
+
145
+ The result is the concatenation of all mega-batches, with the batch of `batch_size` containing the element of
146
+ maximum length placed first, so that an OOM happens sooner rather than later.
147
+ """
148
+
149
+ # We need to use torch for the random part as a distributed sampler will set the random seed for torch.
150
+ indices = torch.randperm(len(lengths), generator=generator)
151
+ megabatch_size = world_size * batch_size
152
+ megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)]
153
+ megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches]
154
+ megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches]
155
+
156
+ return [i for megabatch in megabatches for batch in megabatch for i in batch]
157
+
158
+
159
+ def get_length_grouped_indices_auto_single(lengths, batch_size, world_size, generator=None):
160
+ indices = get_length_grouped_indices_hf(lengths, batch_size * world_size, generator=generator)
161
+
162
+ megabatch_size = world_size * batch_size
163
+ megabatches = [indices[i : i + megabatch_size] for i in range(0, len(lengths), megabatch_size)]
164
+ megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches]
165
+ megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches]
166
+
167
+ # We need to use torch for the random part as a distributed sampler will set the random seed for torch.
168
+ batch_indices = torch.randperm(len(megabatches), generator=generator)
169
+ megabatches = [megabatches[i] for i in batch_indices]
170
+
171
+ return [i for megabatch in megabatches for batch in megabatch for i in batch]
172
+
173
+
174
+ def get_modality_length_grouped_indices_auto(lengths, batch_size, world_size, generator=None):
175
+ # We need to use torch for the random part as a distributed sampler will set the random seed for torch.
176
+ assert all(l != 0 for l in lengths), "Should not have zero length."
177
+ if all(l > 0 for l in lengths) or all(l < 0 for l in lengths):
178
+ # all samples are in the same modality
179
+ return get_length_grouped_indices_auto_single(lengths, batch_size, world_size, generator=generator)
180
+ mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0])
181
+ lang_indices, lang_lengths = zip(*[(i, -l) for i, l in enumerate(lengths) if l < 0])
182
+
183
+ mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices_auto_single(mm_lengths, batch_size, world_size, generator=None)]
184
+ lang_shuffle = [lang_indices[i] for i in get_length_grouped_indices_auto_single(lang_lengths, batch_size, world_size, generator=None)]
185
+ megabatch_size = world_size * batch_size
186
+ mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)]
187
+ lang_megabatches = [lang_shuffle[i : i + megabatch_size] for i in range(0, len(lang_shuffle), megabatch_size)]
188
+
189
+ last_mm = mm_megabatches[-1]
190
+ last_lang = lang_megabatches[-1]
191
+ additional_batch = last_mm + last_lang
192
+ megabatches = mm_megabatches[:-1] + lang_megabatches[:-1]
193
+ megabatch_indices = torch.randperm(len(megabatches), generator=generator)
194
+ megabatches = [megabatches[i] for i in megabatch_indices]
195
+
196
+ # FIXME: Hard code to avoid last batch mixed with different modalities
197
+ # if len(additional_batch) > 0:
198
+ # megabatches.append(sorted(additional_batch))
199
+
200
+ return [i for megabatch in megabatches for i in megabatch]
201
+
202
+
203
+ class LengthGroupedSampler(Sampler):
204
+ r"""
205
+ Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while
206
+ keeping a bit of randomness.
207
+ """
208
+
209
+ def __init__(
210
+ self,
211
+ batch_size: int,
212
+ world_size: int,
213
+ lengths: Optional[List[int]] = None,
214
+ generator=None,
215
+ variable_length: bool = False,
216
+ group_by_modality: bool = False,
217
+ group_by_modality_auto: bool = False,
218
+ ):
219
+ if lengths is None:
220
+ raise ValueError("Lengths must be provided.")
221
+
222
+ self.batch_size = batch_size
223
+ self.world_size = world_size
224
+ self.lengths = lengths
225
+ self.generator = generator
226
+ self.variable_length = variable_length
227
+ self.group_by_modality = group_by_modality
228
+ self.group_by_modality_auto = group_by_modality_auto
229
+
230
+ def __len__(self):
231
+ return len(self.lengths)
232
+
233
+ def __iter__(self):
234
+ if self.variable_length:
235
+ assert not self.group_by_modality, "Variable length grouping is not supported with modality grouping."
236
+ indices = get_variable_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator)
237
+ else:
238
+ if self.group_by_modality:
239
+ indices = get_modality_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator)
240
+ elif self.group_by_modality_auto:
241
+ indices = get_modality_length_grouped_indices_auto(self.lengths, self.batch_size, self.world_size, generator=self.generator)
242
+ else:
243
+ indices = get_length_grouped_indices_auto_single(self.lengths, self.batch_size, self.world_size, generator=self.generator)
244
+ return iter(indices)
245
+
246
+
247
+ class LLaVATrainer(Trainer):
248
+
249
+ def create_accelerator_and_postprocess(self):
250
+ grad_acc_kwargs = {"num_steps": self.args.gradient_accumulation_steps}
251
+ grad_acc_kwargs["sync_with_dataloader"] = False
252
+ gradient_accumulation_plugin = GradientAccumulationPlugin(**grad_acc_kwargs)
253
+
254
+ accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
255
+ rank0_print("Setting NCCL timeout to INF to avoid running errors.")
256
+
257
+ dataloader_config = DataLoaderConfiguration()
258
+
259
+ # create accelerator object
260
+ # self.accelerator = Accelerator(dispatch_batches=self.args.dispatch_batches, split_batches=self.args.split_batches, deepspeed_plugin=self.args.deepspeed_plugin, gradient_accumulation_plugin=gradient_accumulation_plugin, kwargs_handlers=[accelerator_kwargs])
261
+ self.accelerator = Accelerator(dataloader_config=dataloader_config, deepspeed_plugin=self.args.deepspeed_plugin, gradient_accumulation_plugin=gradient_accumulation_plugin, kwargs_handlers=[accelerator_kwargs])
262
+ # some Trainer classes need to use `gather` instead of `gather_for_metrics`, thus we store a flag
263
+ self.gather_function = self.accelerator.gather_for_metrics
264
+
265
+ # deepspeed and accelerate flags covering both trainer args and accelerate launcher
266
+ self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None
267
+ self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None
268
+
269
+ # post accelerator creation setup
270
+ if self.is_fsdp_enabled:
271
+ fsdp_plugin = self.accelerator.state.fsdp_plugin
272
+ fsdp_plugin.limit_all_gathers = self.args.fsdp_config.get("limit_all_gathers", fsdp_plugin.limit_all_gathers)
273
+ if is_accelerate_available("0.23.0"):
274
+ fsdp_plugin.activation_checkpointing = self.args.fsdp_config.get("activation_checkpointing", fsdp_plugin.activation_checkpointing)
275
+ if fsdp_plugin.activation_checkpointing and self.args.gradient_checkpointing:
276
+ raise ValueError("The activation_checkpointing in FSDP config and the gradient_checkpointing in training arg " "can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic " "when using FSDP.")
277
+
278
+ if self.is_deepspeed_enabled and getattr(self.args, "hf_deepspeed_config", None) is None:
279
+ self.propagate_args_to_deepspeed()
280
+
281
+ def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
282
+ if self.train_dataset is None or not has_length(self.train_dataset):
283
+ return None
284
+
285
+ if self.args.group_by_length:
286
+ lengths = self.train_dataset.lengths
287
+ return LengthGroupedSampler(
288
+ # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps
289
+ self.args.train_batch_size,
290
+ # world_size=self.args.world_size,
291
+ world_size=self.args.world_size * self.args.gradient_accumulation_steps, # TODO: seems that this may work?
292
+ lengths=lengths,
293
+ )
294
+ elif self.args.group_by_modality_length:
295
+ lengths = self.train_dataset.modality_lengths
296
+ return LengthGroupedSampler(
297
+ # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps
298
+ self.args.train_batch_size,
299
+ # world_size=self.args.world_size,
300
+ world_size=self.args.world_size * self.args.gradient_accumulation_steps, # TODO: seems that this may work?
301
+ lengths=lengths,
302
+ group_by_modality=True,
303
+ )
304
+ elif self.args.group_by_modality_length_auto:
305
+ lengths = self.train_dataset.modality_lengths
306
+ return LengthGroupedSampler(
307
+ # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps
308
+ self.args.train_batch_size,
309
+ # world_size=self.args.world_size,
310
+ world_size=self.args.world_size * self.args.gradient_accumulation_steps, # TODO: seems that this may work?
311
+ lengths=lengths,
312
+ group_by_modality_auto=True,
313
+ )
314
+ elif self.args.group_by_varlen:
315
+ lengths = self.train_dataset.lengths
316
+ return LengthGroupedSampler(
317
+ self.args.train_batch_size * self.args.gradient_accumulation_steps,
318
+ # self.args.train_batch_size, # TODO: seems that we should have gradient_accumulation_steps
319
+ # world_size=self.args.world_size,
320
+ world_size=self.args.world_size * self.args.gradient_accumulation_steps, # TODO: seems that this may work?
321
+ lengths=lengths,
322
+ variable_length=True,
323
+ )
324
+ else:
325
+ return super()._get_train_sampler()
326
+
327
+ def get_train_dataloader(self) -> DataLoader:
328
+ """
329
+ Returns the training [`~torch.utils.data.DataLoader`].
330
+
331
+ Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
332
+ training if necessary) otherwise.
333
+
334
+ Subclass and override this method if you want to inject some custom behavior.
335
+ """
336
+ if self.train_dataset is None:
337
+ raise ValueError("Trainer: training requires a train_dataset.")
338
+
339
+ train_dataset = self.train_dataset
340
+ data_collator = self.data_collator
341
+ if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
342
+ train_dataset = self._remove_unused_columns(train_dataset, description="training")
343
+ else:
344
+ data_collator = self._get_collator_with_removed_columns(data_collator, description="training")
345
+
346
+ dataloader_params = {
347
+ "batch_size": self._train_batch_size,
348
+ "collate_fn": data_collator,
349
+ "num_workers": self.args.dataloader_num_workers,
350
+ "pin_memory": self.args.dataloader_pin_memory,
351
+ "persistent_workers": self.args.dataloader_persistent_workers,
352
+ }
353
+
354
+ if not isinstance(train_dataset, torch.utils.data.IterableDataset):
355
+ dataloader_params["sampler"] = self._get_train_sampler()
356
+ dataloader_params["drop_last"] = self.args.dataloader_drop_last
357
+ dataloader_params["worker_init_fn"] = seed_worker
358
+ dataloader_params["prefetch_factor"] = self.args.dataloader_num_workers * 2 if self.args.dataloader_num_workers != 0 else None
359
+
360
+ dataloader = self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params))
361
+
362
+ return dataloader
363
+
364
+ def create_optimizer(self):
365
+ """
366
+ Setup the optimizer.
367
+
368
+ We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
369
+ Trainer's init through `optimizers`, or subclass and override this method in a subclass.
370
+ """
371
+ if is_sagemaker_mp_enabled():
372
+ return super().create_optimizer()
373
+
374
+ opt_model = self.model
375
+
376
+ if self.optimizer is None:
377
+ decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
378
+ decay_parameters = [name for name in decay_parameters if "bias" not in name]
379
+ lr_mapper = {}
380
+ lr_mapper_merger = {}
381
+ if self.args.mm_projector_lr is not None:
382
+ lr_mapper["mm_projector"] = self.args.mm_projector_lr
383
+ if self.args.mm_vision_tower_lr is not None:
384
+ lr_mapper["vision_tower"] = self.args.mm_vision_tower_lr
385
+ if self.args.mm_vision_tower_merger_lr is not None:
386
+ lr_mapper_merger["merger"] = self.args.mm_vision_tower_merger_lr
387
+ if len(lr_mapper) > 0:
388
+ special_lr_parameters = [name for name, _ in opt_model.named_parameters() if any(module_keyword in name for module_keyword in lr_mapper)]
389
+ optimizer_grouped_parameters = [
390
+ {
391
+ "params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and n not in special_lr_parameters and p.requires_grad)],
392
+ "weight_decay": self.args.weight_decay,
393
+ },
394
+ {
395
+ "params": [p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n not in special_lr_parameters and p.requires_grad)],
396
+ "weight_decay": 0.0,
397
+ },
398
+ ]
399
+ for module_keyword, lr in lr_mapper.items():
400
+ if lr_mapper_merger:
401
+ module_parameters = [name for name, _ in opt_model.named_parameters() if module_keyword in name and "merger" not in name]
402
+ else:
403
+ module_parameters = [name for name, _ in opt_model.named_parameters() if module_keyword in name]
404
+ optimizer_grouped_parameters.extend(
405
+ [
406
+ {
407
+ "params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in module_parameters and p.requires_grad)],
408
+ "weight_decay": self.args.weight_decay,
409
+ "lr": lr,
410
+ },
411
+ {
412
+ "params": [p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in module_parameters and p.requires_grad)],
413
+ "weight_decay": 0.0,
414
+ "lr": lr,
415
+ },
416
+ ]
417
+ )
418
+ for module_keyword, lr in lr_mapper_merger.items():
419
+ module_parameters = [name for name, _ in opt_model.named_parameters() if module_keyword in name]
420
+ optimizer_grouped_parameters.extend(
421
+ [
422
+ {
423
+ "params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in module_parameters and p.requires_grad)],
424
+ "weight_decay": self.args.weight_decay,
425
+ "lr": lr,
426
+ },
427
+ {
428
+ "params": [p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in module_parameters and p.requires_grad)],
429
+ "weight_decay": 0.0,
430
+ "lr": lr,
431
+ },
432
+ ]
433
+ )
434
+ else:
435
+ optimizer_grouped_parameters = [
436
+ {
437
+ "params": [p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)],
438
+ "weight_decay": self.args.weight_decay,
439
+ },
440
+ {
441
+ "params": [p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)],
442
+ "weight_decay": 0.0,
443
+ },
444
+ ]
445
+
446
+ optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
447
+
448
+ self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
449
+ if optimizer_cls.__name__ == "Adam8bit":
450
+ import bitsandbytes
451
+
452
+ manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
453
+
454
+ skipped = 0
455
+ for module in opt_model.modules():
456
+ if isinstance(module, nn.Embedding):
457
+ skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
458
+ logger.info(f"skipped {module}: {skipped/2**20}M params")
459
+ manager.register_module_override(module, "weight", {"optim_bits": 32})
460
+ logger.debug(f"bitsandbytes: will optimize {module} in fp32")
461
+ logger.info(f"skipped: {skipped/2**20}M params")
462
+
463
+ return self.optimizer
464
+
465
+ def _save_checkpoint(self, model, trial):
466
+ if getattr(self.args, "tune_mm_mlp_adapter", False) or (
467
+ hasattr(self.args, "mm_tunable_parts") and (len(self.args.mm_tunable_parts.split(",")) == 1 and ("mm_mlp_adapter" in self.args.mm_tunable_parts or "mm_vision_resampler" in self.args.mm_tunable_parts))
468
+ ):
469
+ from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
470
+
471
+ checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
472
+
473
+ run_dir = self._get_output_dir(trial=trial)
474
+ output_dir = os.path.join(run_dir, checkpoint_folder)
475
+
476
+ # Only save Adapter
477
+ keys_to_match = ["mm_projector", "vision_resampler"]
478
+ if getattr(self.args, "use_im_start_end", False):
479
+ keys_to_match.extend(["embed_tokens", "embed_in"])
480
+
481
+ weight_to_save = get_mm_adapter_state_maybe_zero_3(self.model.named_parameters(), keys_to_match)
482
+
483
+ if self.args.local_rank == 0 or self.args.local_rank == -1:
484
+ self.model.config.save_pretrained(output_dir)
485
+ torch.save(weight_to_save, os.path.join(output_dir, f"mm_projector.bin"))
486
+ else:
487
+ super(LLaVATrainer, self)._save_checkpoint(model, trial)
488
+
489
+ def _save(self, output_dir: Optional[str] = None, state_dict=None):
490
+ if getattr(self.args, "tune_mm_mlp_adapter", False):
491
+ pass
492
+ else:
493
+ super(LLaVATrainer, self)._save(output_dir, state_dict)
494
+
495
+
496
+ class LLaVADPOTrainer(DPOTrainer):
497
+ def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
498
+ if self.train_dataset is None or not has_length(self.train_dataset):
499
+ return None
500
+
501
+ if self.args.group_by_modality_length:
502
+ lengths = self.train_dataset.modality_lengths
503
+ return LengthGroupedSampler(
504
+ # self.args.train_batch_size * self.args.gradient_accumulation_steps, # TODO: seems that we should not have gradient_accumulation_steps
505
+ self.args.train_batch_size,
506
+ world_size=self.args.world_size,
507
+ lengths=lengths,
508
+ group_by_modality=True,
509
+ )
510
+ else:
511
+ return super()._get_train_sampler()
512
+
513
+ def _save_checkpoint(self, model, trial):
514
+ if getattr(self.args, "tune_mm_mlp_adapter", False) or (
515
+ hasattr(self.args, "mm_tunable_parts") and (len(self.args.mm_tunable_parts.split(",")) == 1 and ("mm_mlp_adapter" in self.args.mm_tunable_parts or "mm_vision_resampler" in self.args.mm_tunable_parts))
516
+ ):
517
+ from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
518
+
519
+ checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
520
+
521
+ run_dir = self._get_output_dir(trial=trial)
522
+ output_dir = os.path.join(run_dir, checkpoint_folder)
523
+
524
+ # Only save Adapter
525
+ keys_to_match = ["mm_projector", "vision_resampler"]
526
+ if getattr(self.args, "use_im_start_end", False):
527
+ keys_to_match.extend(["embed_tokens", "embed_in"])
528
+
529
+ weight_to_save = get_mm_adapter_state_maybe_zero_3(self.model.named_parameters(), keys_to_match)
530
+
531
+ if self.args.local_rank == 0 or self.args.local_rank == -1:
532
+ self.model.config.save_pretrained(output_dir)
533
+ torch.save(weight_to_save, os.path.join(output_dir, f"mm_projector.bin"))
534
+ else:
535
+ # super(LLaVADPOTrainer, self)._save_checkpoint(model, trial)
536
+ # print(type(model))
537
+ # from transformers.modeling_utils import unwrap_model
538
+ # print(type(unwrap_model(model)))
539
+ # print(unwrap_model(model).config)
540
+ if self.args.lora_enable:
541
+ from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
542
+
543
+ checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
544
+ run_dir = self._get_output_dir(trial=trial)
545
+ output_dir = os.path.join(run_dir, checkpoint_folder)
546
+ from transformers.modeling_utils import unwrap_model
547
+
548
+ unwrapped_model = unwrap_model(model)
549
+ self.save_my_lora_ckpt(output_dir, self.args, unwrapped_model)
550
+ else:
551
+ super(LLaVADPOTrainer, self)._save_checkpoint(model, trial)
552
+
553
+ def _save(self, output_dir: Optional[str] = None, state_dict=None):
554
+ if getattr(self.args, "tune_mm_mlp_adapter", False):
555
+ pass
556
+ else:
557
+ super(LLaVADPOTrainer, self)._save(output_dir, state_dict)
VLMEvalKit-sudoku/llava/train/train.py ADDED
The diff for this file is too large to render. See raw diff
 
VLMEvalKit-sudoku/vlmeval/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ssl
2
+ ssl._create_default_https_context = ssl._create_unverified_context
3
+ # Temporarily bypass SSL certificate verification to download files from oss.
4
+
5
+ try:
6
+ import torch
7
+ except ImportError:
8
+ pass
9
+
10
+ from .smp import *
11
+ load_env()
12
+
13
+ from .api import *
14
+ from .dataset import *
15
+ from .utils import *
16
+ from .vlm import *
17
+ from .config import *
18
+ from .tools import cli
19
+
20
+
21
+ __version__ = '0.2rc1'
VLMEvalKit-sudoku/vlmeval/api/__pycache__/base.cpython-310.pyc ADDED
Binary file (9.69 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/bluelm_api.cpython-310.pyc ADDED
Binary file (8.09 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/cloudwalk.cpython-310.pyc ADDED
Binary file (3.73 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/doubao_vl_api.cpython-310.pyc ADDED
Binary file (7.35 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/gpt.cpython-310.pyc ADDED
Binary file (8.79 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/mug_u.cpython-310.pyc ADDED
Binary file (7.48 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/qwen_api.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/sensechat_vision.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/siliconflow.cpython-310.pyc ADDED
Binary file (7.41 kB). View file
 
VLMEvalKit-sudoku/vlmeval/api/__pycache__/taiyi.cpython-310.pyc ADDED
Binary file (6.51 kB). View file
 
VLMEvalKit-sudoku/vlmeval/config.py ADDED
@@ -0,0 +1,1659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vlmeval.vlm import *
2
+ from vlmeval.api import *
3
+ from functools import partial
4
+ import os
5
+
6
+ PandaGPT_ROOT = None
7
+ MiniGPT4_ROOT = None
8
+ TransCore_ROOT = None
9
+ Yi_ROOT = None
10
+ OmniLMM_ROOT = None
11
+ Mini_Gemini_ROOT = None
12
+ VXVERSE_ROOT = None
13
+ VideoChat2_ROOT = None
14
+ VideoChatGPT_ROOT = None
15
+ PLLaVA_ROOT = None
16
+ RBDash_ROOT = None
17
+ VITA_ROOT = None
18
+ LLAVA_V1_7B_MODEL_PTH = "Please set your local path to LLaVA-7B-v1.1 here, the model weight is obtained by merging LLaVA delta weight based on vicuna-7b-v1.1 in https://github.com/haotian-liu/LLaVA/blob/main/docs/MODEL_ZOO.md with vicuna-7b-v1.1. "
19
+
20
+ video_models = {
21
+ "Video-LLaVA-7B": partial(VideoLLaVA, model_path="LanguageBind/Video-LLaVA-7B"),
22
+ "Video-LLaVA-7B-HF": partial(
23
+ VideoLLaVA_HF, model_path="LanguageBind/Video-LLaVA-7B-hf"
24
+ ),
25
+ "VideoChat2-HD": partial(
26
+ VideoChat2_HD,
27
+ model_path="OpenGVLab/VideoChat2_HD_stage4_Mistral_7B",
28
+ root=VideoChat2_ROOT,
29
+ config_file="./vlmeval/vlm/video_llm/configs/videochat2_hd.json",
30
+ ),
31
+ "Chat-UniVi-7B": partial(Chatunivi, model_path="Chat-UniVi/Chat-UniVi"),
32
+ "Chat-UniVi-7B-v1.5": partial(
33
+ Chatunivi, model_path="Chat-UniVi/Chat-UniVi-7B-v1.5"
34
+ ),
35
+ "LLaMA-VID-7B": partial(
36
+ LLaMAVID, model_path="YanweiLi/llama-vid-7b-full-224-video-fps-1"
37
+ ),
38
+ "Video-ChatGPT": partial(
39
+ VideoChatGPT, model_path="MBZUAI/Video-ChatGPT-7B", dir_root=VideoChatGPT_ROOT
40
+ ),
41
+ "PLLaVA-7B": partial(PLLaVA, model_path="ermu2001/pllava-7b", dir_root=PLLaVA_ROOT),
42
+ "PLLaVA-13B": partial(
43
+ PLLaVA, model_path="ermu2001/pllava-13b", dir_root=PLLaVA_ROOT
44
+ ),
45
+ "PLLaVA-34B": partial(
46
+ PLLaVA, model_path="ermu2001/pllava-34b", dir_root=PLLaVA_ROOT
47
+ ),
48
+ }
49
+
50
+ ungrouped = {
51
+ 'llava_uhd_resampler_query_49': partial(LLaVA_UHD_SIGLIP2_SLICE, model_path='https://huggingface.co/ZzzHelloWorld/llava-uhd-final/tree/main'),
52
+ 'llava_uhd_final': partial(LLaVA_UHD_SIGLIP2, model_path='https://huggingface.co/ZzzHelloWorld/llava_uhd_resampler_query_49'),
53
+ }
54
+
55
+ o1_key = os.environ.get('O1_API_KEY', None)
56
+ o1_base = os.environ.get('O1_API_BASE', None)
57
+ o1_apis = {
58
+ 'o1': partial(
59
+ GPT4V,
60
+ model="o1-2024-12-17",
61
+ key=o1_key,
62
+ api_base=o1_base,
63
+ temperature=0,
64
+ img_detail='high',
65
+ retry=3,
66
+ timeout=1800,
67
+ max_tokens=16384,
68
+ verbose=False,
69
+
70
+ ),
71
+ 'o3': partial(
72
+ GPT4V,
73
+ model="o3-2025-04-16",
74
+ key=o1_key,
75
+ api_base=o1_base,
76
+ temperature=0,
77
+ img_detail='high',
78
+ retry=3,
79
+ timeout=1800,
80
+ max_tokens=16384,
81
+ verbose=False,
82
+ ),
83
+ 'o4-mini': partial(
84
+ GPT4V,
85
+ model="o4-mini-2025-04-16",
86
+ key=o1_key,
87
+ api_base=o1_base,
88
+ temperature=0,
89
+ img_detail='high',
90
+ retry=3,
91
+ timeout=1800,
92
+ max_tokens=16384,
93
+ verbose=False,
94
+ ),
95
+ }
96
+
97
+ api_models = {
98
+ # GPT
99
+ "GPT4V": partial(
100
+ GPT4V,
101
+ model="gpt-4-1106-vision-preview",
102
+ temperature=0,
103
+ img_size=512,
104
+ img_detail="low",
105
+ retry=10,
106
+ verbose=False,
107
+ ),
108
+ "GPT4V_HIGH": partial(
109
+ GPT4V,
110
+ model="gpt-4-1106-vision-preview",
111
+ temperature=0,
112
+ img_size=-1,
113
+ img_detail="high",
114
+ retry=10,
115
+ verbose=False,
116
+ ),
117
+ "GPT4V_20240409": partial(
118
+ GPT4V,
119
+ model="gpt-4-turbo-2024-04-09",
120
+ temperature=0,
121
+ img_size=512,
122
+ img_detail="low",
123
+ retry=10,
124
+ verbose=False,
125
+ ),
126
+ "GPT4V_20240409_HIGH": partial(
127
+ GPT4V,
128
+ model="gpt-4-turbo-2024-04-09",
129
+ temperature=0,
130
+ img_size=-1,
131
+ img_detail="high",
132
+ retry=10,
133
+ verbose=False,
134
+ ),
135
+ "GPT4o": partial(
136
+ GPT4V,
137
+ model="gpt-4o-2024-05-13",
138
+ temperature=0,
139
+ img_size=512,
140
+ img_detail="low",
141
+ retry=10,
142
+ verbose=False,
143
+ ),
144
+ "GPT4o_HIGH": partial(
145
+ GPT4V,
146
+ model="gpt-4o-2024-05-13",
147
+ temperature=0,
148
+ img_size=-1,
149
+ img_detail="high",
150
+ retry=10,
151
+ verbose=False,
152
+ ),
153
+ "GPT4o_20240806": partial(
154
+ GPT4V,
155
+ model="gpt-4o-2024-08-06",
156
+ temperature=0,
157
+ img_size=-1,
158
+ img_detail="high",
159
+ retry=10,
160
+ verbose=False,
161
+ ),
162
+ "GPT4o_20241120": partial(
163
+ GPT4V,
164
+ model="gpt-4o-2024-11-20",
165
+ temperature=0,
166
+ img_size=-1,
167
+ img_detail="high",
168
+ retry=10,
169
+ verbose=False,
170
+ ),
171
+ "ChatGPT4o": partial(
172
+ GPT4V,
173
+ model="chatgpt-4o-latest",
174
+ temperature=0,
175
+ img_size=-1,
176
+ img_detail="high",
177
+ retry=10,
178
+ verbose=False,
179
+ ),
180
+ "GPT4o_MINI": partial(
181
+ GPT4V,
182
+ model="gpt-4o-mini-2024-07-18",
183
+ temperature=0,
184
+ img_size=-1,
185
+ img_detail="high",
186
+ retry=10,
187
+ verbose=False,
188
+ ),
189
+ "GPT4.5": partial(
190
+ GPT4V,
191
+ model='gpt-4.5-preview-2025-02-27',
192
+ temperature=0,
193
+ timeout=600,
194
+ img_size=-1,
195
+ img_detail='high',
196
+ retry=10,
197
+ verbose=False,
198
+ ),
199
+ "gpt-4.1-2025-04-14": partial(
200
+ GPT4V,
201
+ model="gpt-4.1-2025-04-14",
202
+ temperature=0,
203
+ img_size=-1,
204
+ img_detail="high",
205
+ retry=10,
206
+ verbose=False,
207
+ ),
208
+ "gpt-4.1-mini-2025-04-14": partial(
209
+ GPT4V,
210
+ model="gpt-4.1-mini-2025-04-14",
211
+ temperature=0,
212
+ img_size=-1,
213
+ img_detail="high",
214
+ retry=10,
215
+ verbose=False,
216
+ ),
217
+ "gpt-4.1-nano-2025-04-14": partial(
218
+ GPT4V,
219
+ model="gpt-4.1-nano-2025-04-14",
220
+ temperature=0,
221
+ img_size=-1,
222
+ img_detail="high",
223
+ retry=10,
224
+ verbose=False,
225
+ ),
226
+ "gpt-5-2025-08-07": partial(
227
+ GPT4V,
228
+ model="gpt-5-2025-08-07",
229
+ img_detail="high",
230
+ retry=3,
231
+ verbose=False,
232
+ max_tokens=2**14,
233
+ timeout=300,
234
+ ),
235
+ "gpt-5-mini-2025-08-07": partial(
236
+ GPT4V,
237
+ model="gpt-5-mini-2025-08-07",
238
+ img_detail="high",
239
+ retry=3,
240
+ verbose=False,
241
+ max_tokens=2**14,
242
+ timeout=300,
243
+ ),
244
+ "gpt-5-nano-2025-08-07": partial(
245
+ GPT4V,
246
+ model="gpt-5-nano-2025-08-07",
247
+ img_detail="high",
248
+ retry=3,
249
+ verbose=False,
250
+ max_tokens=2**14,
251
+ timeout=300,
252
+ ),
253
+ # Gemini
254
+ "GeminiPro1-0": partial(
255
+ Gemini, model="gemini-1.0-pro", temperature=0, retry=10
256
+ ), # now GeminiPro1-0 is only supported by vertex backend
257
+ "GeminiPro1-5": partial(
258
+ Gemini, model="gemini-1.5-pro", temperature=0, retry=10
259
+ ),
260
+ "GeminiFlash1-5": partial(
261
+ Gemini, model="gemini-1.5-flash", temperature=0, retry=10
262
+ ),
263
+ "GeminiPro1-5-002": partial(
264
+ GPT4V, model="gemini-1.5-pro-002", temperature=0, retry=10
265
+ ), # Internal Use Only
266
+ "GeminiFlash1-5-002": partial(
267
+ GPT4V, model="gemini-1.5-flash-002", temperature=0, retry=10
268
+ ), # Internal Use Only
269
+ "GeminiFlash2-0": partial(
270
+ Gemini, model="gemini-2.0-flash", temperature=0, retry=10
271
+ ),
272
+ "GeminiFlashLite2-0": partial(
273
+ Gemini, model="gemini-2.0-flash-lite", temperature=0, retry=10
274
+ ),
275
+ "GeminiFlash2-5": partial(
276
+ Gemini, model="gemini-2.5-flash", temperature=0, retry=10
277
+ ),
278
+ "GeminiPro2-5": partial(
279
+ Gemini, model="gemini-2.5-pro", temperature=0, retry=10
280
+ ),
281
+
282
+ # Qwen-VL
283
+ "QwenVLPlus": partial(QwenVLAPI, model="qwen-vl-plus", temperature=0, retry=10),
284
+ "QwenVLMax": partial(QwenVLAPI, model="qwen-vl-max", temperature=0, retry=10),
285
+ "QwenVLMax-250408": partial(QwenVLAPI, model="qwen-vl-max-2025-04-08", temperature=0, retry=10),
286
+
287
+ # Reka
288
+ "RekaEdge": partial(Reka, model="reka-edge-20240208"),
289
+ "RekaFlash": partial(Reka, model="reka-flash-20240226"),
290
+ "RekaCore": partial(Reka, model="reka-core-20240415"),
291
+ # Step1V
292
+ "Step1V": partial(
293
+ GPT4V,
294
+ model="step-1v-32k",
295
+ api_base="https://api.stepfun.com/v1/chat/completions",
296
+ temperature=0,
297
+ retry=10,
298
+ img_size=-1,
299
+ img_detail="high",
300
+ ),
301
+ "Step1.5V-mini": partial(
302
+ GPT4V,
303
+ model="step-1.5v-mini",
304
+ api_base="https://api.stepfun.com/v1/chat/completions",
305
+ temperature=0,
306
+ retry=10,
307
+ img_size=-1,
308
+ img_detail="high",
309
+ ),
310
+ "Step1o": partial(
311
+ GPT4V,
312
+ model="step-1o-vision-32k",
313
+ api_base="https://api.stepfun.com/v1/chat/completions",
314
+ temperature=0,
315
+ retry=10,
316
+ img_size=-1,
317
+ img_detail="high",
318
+ ),
319
+ # Yi-Vision
320
+ "Yi-Vision": partial(
321
+ GPT4V,
322
+ model="yi-vision",
323
+ api_base="https://api.lingyiwanwu.com/v1/chat/completions",
324
+ temperature=0,
325
+ retry=10,
326
+ ),
327
+ # Claude
328
+ "Claude3V_Opus": partial(
329
+ Claude3V, model="claude-3-opus-20240229", temperature=0, retry=10, verbose=False
330
+ ),
331
+ "Claude3V_Sonnet": partial(
332
+ Claude3V,
333
+ model="claude-3-sonnet-20240229",
334
+ temperature=0,
335
+ retry=10,
336
+ verbose=False,
337
+ ),
338
+ "Claude3V_Haiku": partial(
339
+ Claude3V,
340
+ model="claude-3-haiku-20240307",
341
+ temperature=0,
342
+ retry=10,
343
+ verbose=False,
344
+ ),
345
+ "Claude3-5V_Sonnet": partial(
346
+ Claude3V,
347
+ model="claude-3-5-sonnet-20240620",
348
+ temperature=0,
349
+ retry=10,
350
+ verbose=False,
351
+ ),
352
+ "Claude3-5V_Sonnet_20241022": partial(
353
+ Claude3V,
354
+ model="claude-3-5-sonnet-20241022",
355
+ temperature=0,
356
+ retry=10,
357
+ verbose=False,
358
+ ),
359
+ "Claude3-7V_Sonnet": partial(
360
+ Claude3V,
361
+ model="claude-3-7-sonnet-20250219",
362
+ temperature=0,
363
+ retry=10,
364
+ verbose=False,
365
+ ),
366
+ "Claude4_Opus": partial(
367
+ Claude3V,
368
+ model="claude-4-opus-20250514",
369
+ temperature=0,
370
+ retry=10,
371
+ verbose=False,
372
+ timeout=1800
373
+ ),
374
+ "Claude4_Sonnet": partial(
375
+ Claude3V,
376
+ model="claude-4-sonnet-20250514",
377
+ temperature=0,
378
+ retry=10,
379
+ verbose=False,
380
+ timeout=1800
381
+ ),
382
+ # GLM4V
383
+ "GLM4V": partial(GLMVisionAPI, model="glm4v-biz-eval", temperature=0, retry=10),
384
+ "GLM4V_PLUS": partial(GLMVisionAPI, model="glm-4v-plus", temperature=0, retry=10),
385
+ "GLM4V_PLUS_20250111": partial(
386
+ GLMVisionAPI, model="glm-4v-plus-0111", temperature=0, retry=10
387
+ ),
388
+ # MiniMax abab
389
+ "abab6.5s": partial(
390
+ GPT4V,
391
+ model="abab6.5s-chat",
392
+ api_base="https://api.minimax.chat/v1/chat/completions",
393
+ temperature=0,
394
+ retry=10,
395
+ ),
396
+ "abab7-preview": partial(
397
+ GPT4V,
398
+ model="abab7-chat-preview",
399
+ api_base="https://api.minimax.chat/v1/chat/completions",
400
+ temperature=0,
401
+ retry=10,
402
+ ),
403
+ # CongRong
404
+ "CongRong-v1.5": partial(CWWrapper, model="cw-congrong-v1.5", temperature=0, retry=10),
405
+ "CongRong-v2.0": partial(CWWrapper, model="cw-congrong-v2.0", temperature=0, retry=10),
406
+ # SenseNova
407
+ "SenseNova-V6-Pro": partial(
408
+ SenseChatVisionAPI, model="SenseNova-V6-Pro", temperature=0, retry=10
409
+ ),
410
+ "SenseNova-V6-Reasoner": partial(
411
+ SenseChatVisionAPI, model="SenseNova-V6-Reasoner", temperature=0, retry=10
412
+ ),
413
+ "SenseNova-V6-5-Pro": partial(
414
+ SenseChatVisionAPI, model="SenseNova-V6-5-Pro", retry=10
415
+ ),
416
+ "HunYuan-Vision": partial(
417
+ HunyuanVision, model="hunyuan-vision", temperature=0, retry=10
418
+ ),
419
+ "HunYuan-Standard-Vision": partial(
420
+ HunyuanVision, model="hunyuan-standard-vision", temperature=0, retry=10
421
+ ),
422
+ "HunYuan-Large-Vision": partial(
423
+ HunyuanVision, model="hunyuan-large-vision", temperature=0, retry=10
424
+ ),
425
+ "BailingMM-Lite-1203": partial(
426
+ bailingMMAPI, model="BailingMM-Lite-1203", temperature=0, retry=10
427
+ ),
428
+ "BailingMM-Pro-0120": partial(
429
+ bailingMMAPI, model="BailingMM-Pro-0120", temperature=0, retry=10
430
+ ),
431
+ # BlueLM-2.5
432
+ "BlueLM-2.5-3B": partial(BlueLM_API, model="BlueLM-2.5-3B", temperature=0, retry=3),
433
+ # JiuTian-VL
434
+ "JTVL": partial(JTVLChatAPI, model="jt-vl-chat", temperature=0, retry=10),
435
+ "Taiyi": partial(TaiyiAPI, model="taiyi", temperature=0, retry=10),
436
+ # TeleMM
437
+ "TeleMM": partial(TeleMMAPI, model="TeleAI/TeleMM", temperature=0, retry=10),
438
+ "Qwen2.5-VL-32B-Instruct-SiliconFlow": partial(
439
+ SiliconFlowAPI, model="Qwen/Qwen2.5-VL-32B-Instruct", temperature=0, retry=10),
440
+ # lmdeploy api
441
+ "lmdeploy": partial(
442
+ LMDeployAPI,
443
+ api_base="http://0.0.0.0:23333/v1/chat/completions",
444
+ temperature=0,
445
+ retry=10,
446
+ ),
447
+ "lmdeploy_internvl_78B_MPO": partial(
448
+ LMDeployAPI,
449
+ api_base="http://0.0.0.0:23333/v1/chat/completions",
450
+ temperature=0,
451
+ retry=10,
452
+ timeout=100,
453
+ ),
454
+ "lmdeploy_qvq_72B_preview": partial(
455
+ LMDeployAPI,
456
+ api_base="http://0.0.0.0:23333/v1/chat/completions",
457
+ temperature=0,
458
+ retry=10,
459
+ timeout=300,
460
+ ),
461
+ 'Taichu-VLR-3B': partial(
462
+ TaichuVLRAPI,
463
+ model='taichu_vlr_3b',
464
+ url="https://platform.wair.ac.cn/maas/v1/chat/completions"
465
+ ),
466
+ 'Taichu-VLR-7B': partial(
467
+ TaichuVLRAPI,
468
+ model='taichu_vlr_7b',
469
+ url="https://platform.wair.ac.cn/maas/v1/chat/completions"
470
+ ),
471
+ # doubao_vl
472
+ "DoubaoVL": partial(
473
+ DoubaoVL, model="Doubao-1.5-vision-pro", temperature=0, retry=3, verbose=False
474
+ ),
475
+ "Seed1.5-VL": partial(
476
+ DoubaoVL,
477
+ model="doubao-1-5-thinking-vision-pro-250428",
478
+ temperature=0,
479
+ retry=3,
480
+ verbose=False,
481
+ max_tokens=16384,
482
+ ),
483
+ "Seed1.6": partial(
484
+ DoubaoVL,
485
+ model="doubao-seed-1.6-250615",
486
+ temperature=0,
487
+ retry=3,
488
+ verbose=False,
489
+ max_tokens=16384,
490
+ ),
491
+ "Seed1.6-Flash": partial(
492
+ DoubaoVL,
493
+ model="doubao-seed-1.6-flash-250615",
494
+ temperature=0,
495
+ retry=3,
496
+ verbose=False,
497
+ max_tokens=16384,
498
+ ),
499
+ "Seed1.6-Thinking": partial(
500
+ DoubaoVL,
501
+ model="doubao-seed-1.6-thinking-250615",
502
+ temperature=0,
503
+ retry=3,
504
+ verbose=False,
505
+ max_tokens=16384,
506
+ ),
507
+ # Shopee MUG-U
508
+ 'MUG-U-7B': partial(
509
+ MUGUAPI,
510
+ model='MUG-U',
511
+ temperature=0,
512
+ retry=10,
513
+ verbose=False,
514
+ timeout=300),
515
+ # grok
516
+ "grok-vision-beta": partial(
517
+ GPT4V,
518
+ model="grok-vision-beta",
519
+ api_base="https://api.x.ai/v1/chat/completions",
520
+ temperature=0,
521
+ retry=10,
522
+ ),
523
+ "grok-2-vision-1212": partial(
524
+ GPT4V,
525
+ model="grok-2-vision",
526
+ api_base="https://api.x.ai/v1/chat/completions",
527
+ temperature=0,
528
+ retry=10,
529
+ ),
530
+ "grok-4-0709": partial(
531
+ GPT4V,
532
+ model="grok-4-0709",
533
+ api_base="https://api.x.ai/v1/chat/completions",
534
+ temperature=0,
535
+ retry=3,
536
+ timeout=1200,
537
+ max_tokens=16384
538
+ ),
539
+ # kimi
540
+ "moonshot-v1-8k": partial(
541
+ GPT4V,
542
+ model="moonshot-v1-8k-vision-preview",
543
+ api_base="https://api.moonshot.cn/v1/chat/completions",
544
+ temperature=0,
545
+ retry=10,
546
+ ),
547
+ "moonshot-v1-32k": partial(
548
+ GPT4V,
549
+ model="moonshot-v1-32k-vision-preview",
550
+ api_base="https://api.moonshot.cn/v1/chat/completions",
551
+ temperature=0,
552
+ retry=10,
553
+ ),
554
+ "moonshot-v1-128k": partial(
555
+ GPT4V,
556
+ model="moonshot-v1-128k-vision-preview",
557
+ api_base="https://api.moonshot.cn/v1/chat/completions",
558
+ temperature=0,
559
+ retry=10,
560
+ ),
561
+ 'ernie4.5-turbo': partial(
562
+ GPT4V,
563
+ model='ernie-4.5-turbo-vl-32k',
564
+ temperature=0,
565
+ retry=3,
566
+ max_tokens=12000,
567
+ ),
568
+ 'ernie4.5-a3b': partial(
569
+ GPT4V,
570
+ model='ernie-4.5-vl-28b-a3b',
571
+ temperature=0,
572
+ retry=3,
573
+ max_tokens=8000,
574
+ )
575
+ }
576
+
577
+ import copy as cp
578
+ api_models['gpt-5'] = cp.deepcopy(api_models['gpt-5-2025-08-07'])
579
+ api_models['gpt-5-mini'] = cp.deepcopy(api_models['gpt-5-mini-2025-08-07'])
580
+ api_models['gpt-5-nano'] = cp.deepcopy(api_models['gpt-5-nano-2025-08-07'])
581
+
582
+ emu_series = {
583
+ "emu2_chat": partial(Emu, model_path="BAAI/Emu2-Chat"),
584
+ "emu3_chat": partial(Emu3_chat, model_path="BAAI/Emu3-Chat"),
585
+ "emu3_gen": partial(Emu3_gen, model_path="BAAI/Emu3-Gen"),
586
+ }
587
+
588
+ granite_vision_series = {
589
+ 'granite_vision_3.1_2b_preview': partial(GraniteVision3, model_path="ibm-granite/granite-vision-3.1-2b-preview"),
590
+ 'granite_vision_3.2_2b': partial(GraniteVision3, model_path="ibm-granite/granite-vision-3.2-2b"),
591
+ 'granite_vision_3.3_2b': partial(GraniteVision3, model_path="ibm-granite/granite-vision-3.3-2b"),
592
+ }
593
+
594
+ mmalaya_series = {
595
+ "MMAlaya": partial(MMAlaya, model_path="DataCanvas/MMAlaya"),
596
+ "MMAlaya2": partial(MMAlaya2, model_path="DataCanvas/MMAlaya2"),
597
+ }
598
+
599
+ minicpm_series = {
600
+ "MiniCPM-V": partial(MiniCPM_V, model_path="openbmb/MiniCPM-V"),
601
+ "MiniCPM-V-2": partial(MiniCPM_V, model_path="openbmb/MiniCPM-V-2"),
602
+ "MiniCPM-Llama3-V-2_5": partial(
603
+ MiniCPM_Llama3_V, model_path="openbmb/MiniCPM-Llama3-V-2_5"
604
+ ),
605
+ "MiniCPM-V-2_6": partial(MiniCPM_V_2_6, model_path="openbmb/MiniCPM-V-2_6"),
606
+ "MiniCPM-o-2_6": partial(MiniCPM_o_2_6, model_path="openbmb/MiniCPM-o-2_6"),
607
+ "MiniCPM-V-4": partial(MiniCPM_V_4, model_path="openbmb/MiniCPM-V-4"),
608
+ "MiniCPM-V-4_5": partial(MiniCPM_V_4_5, model_path="openbmb/MiniCPM-V-4_5"),
609
+ }
610
+
611
+ xtuner_series = {
612
+ "llava-internlm2-7b": partial(
613
+ LLaVA_XTuner,
614
+ llm_path="internlm/internlm2-chat-7b",
615
+ llava_path="xtuner/llava-internlm2-7b",
616
+ visual_select_layer=-2,
617
+ prompt_template="internlm2_chat",
618
+ ),
619
+ "llava-internlm2-20b": partial(
620
+ LLaVA_XTuner,
621
+ llm_path="internlm/internlm2-chat-20b",
622
+ llava_path="xtuner/llava-internlm2-20b",
623
+ visual_select_layer=-2,
624
+ prompt_template="internlm2_chat",
625
+ ),
626
+ "llava-internlm-7b": partial(
627
+ LLaVA_XTuner,
628
+ llm_path="internlm/internlm-chat-7b",
629
+ llava_path="xtuner/llava-internlm-7b",
630
+ visual_select_layer=-2,
631
+ prompt_template="internlm_chat",
632
+ ),
633
+ "llava-v1.5-7b-xtuner": partial(
634
+ LLaVA_XTuner,
635
+ llm_path="lmsys/vicuna-7b-v1.5",
636
+ llava_path="xtuner/llava-v1.5-7b-xtuner",
637
+ visual_select_layer=-2,
638
+ prompt_template="vicuna",
639
+ ),
640
+ "llava-v1.5-13b-xtuner": partial(
641
+ LLaVA_XTuner,
642
+ llm_path="lmsys/vicuna-13b-v1.5",
643
+ llava_path="xtuner/llava-v1.5-13b-xtuner",
644
+ visual_select_layer=-2,
645
+ prompt_template="vicuna",
646
+ ),
647
+ "llava-llama-3-8b": partial(
648
+ LLaVA_XTuner,
649
+ llm_path="xtuner/llava-llama-3-8b-v1_1",
650
+ llava_path="xtuner/llava-llama-3-8b-v1_1",
651
+ visual_select_layer=-2,
652
+ prompt_template="llama3_chat",
653
+ ),
654
+ }
655
+
656
+ qwen_series = {
657
+ "qwen_base": partial(QwenVL, model_path="Qwen/Qwen-VL"),
658
+ "qwen_chat": partial(QwenVLChat, model_path="Qwen/Qwen-VL-Chat"),
659
+ "monkey": partial(Monkey, model_path="echo840/Monkey"),
660
+ "monkey-chat": partial(MonkeyChat, model_path="echo840/Monkey-Chat"),
661
+ "minimonkey": partial(MiniMonkey, model_path="mx262/MiniMonkey"),
662
+ }
663
+
664
+ thyme_series = {
665
+ "Thyme-7B": partial(Thyme, model_path="Kwai-Keye/Thyme-RL")
666
+ }
667
+
668
+ llava_series = {
669
+ "llava_v1.5_7b": partial(LLaVA, model_path="liuhaotian/llava-v1.5-7b"),
670
+ "llava_v1.5_13b": partial(LLaVA, model_path="liuhaotian/llava-v1.5-13b"),
671
+ "llava_v1_7b": partial(LLaVA, model_path=LLAVA_V1_7B_MODEL_PTH),
672
+ "sharegpt4v_7b": partial(LLaVA, model_path="Lin-Chen/ShareGPT4V-7B"),
673
+ "sharegpt4v_13b": partial(LLaVA, model_path="Lin-Chen/ShareGPT4V-13B"),
674
+ "llava_next_vicuna_7b": partial(
675
+ LLaVA_Next, model_path="llava-hf/llava-v1.6-vicuna-7b-hf"
676
+ ),
677
+ "llava_next_vicuna_13b": partial(
678
+ LLaVA_Next, model_path="llava-hf/llava-v1.6-vicuna-13b-hf"
679
+ ),
680
+ "llava_next_mistral_7b": partial(
681
+ LLaVA_Next, model_path="llava-hf/llava-v1.6-mistral-7b-hf"
682
+ ),
683
+ "llava_next_yi_34b": partial(LLaVA_Next, model_path="llava-hf/llava-v1.6-34b-hf"),
684
+ "llava_next_llama3": partial(
685
+ LLaVA_Next, model_path="llava-hf/llama3-llava-next-8b-hf"
686
+ ),
687
+ "llava_next_72b": partial(LLaVA_Next, model_path="llava-hf/llava-next-72b-hf"),
688
+ "llava_next_110b": partial(LLaVA_Next, model_path="llava-hf/llava-next-110b-hf"),
689
+ "llava_next_qwen_32b": partial(
690
+ LLaVA_Next2, model_path="lmms-lab/llava-next-qwen-32b"
691
+ ),
692
+ "llava_next_interleave_7b": partial(
693
+ LLaVA_Next, model_path="llava-hf/llava-interleave-qwen-7b-hf"
694
+ ),
695
+ "llava_next_interleave_7b_dpo": partial(
696
+ LLaVA_Next, model_path="llava-hf/llava-interleave-qwen-7b-dpo-hf"
697
+ ),
698
+ "llava-onevision-qwen2-0.5b-ov-hf": partial(
699
+ LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
700
+ ),
701
+ "llava-onevision-qwen2-0.5b-si-hf": partial(
702
+ LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-0.5b-si-hf"
703
+ ),
704
+ "llava-onevision-qwen2-7b-ov-hf": partial(
705
+ LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-7b-ov-hf"
706
+ ),
707
+ "llava-onevision-qwen2-7b-si-hf": partial(
708
+ LLaVA_OneVision_HF, model_path="llava-hf/llava-onevision-qwen2-7b-si-hf"
709
+ ),
710
+ "llava_onevision_qwen2_0.5b_si": partial(
711
+ LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-0.5b-si"
712
+ ),
713
+ "llava_onevision_qwen2_7b_si": partial(
714
+ LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-7b-si"
715
+ ),
716
+ "llava_onevision_qwen2_72b_si": partial(
717
+ LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-72b-si"
718
+ ),
719
+ "llava_onevision_qwen2_0.5b_ov": partial(
720
+ LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-0.5b-ov"
721
+ ),
722
+ "llava_onevision_qwen2_7b_ov": partial(
723
+ LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-7b-ov"
724
+ ),
725
+ "llava_onevision_qwen2_72b_ov": partial(
726
+ LLaVA_OneVision, model_path="lmms-lab/llava-onevision-qwen2-72b-ov-sft"
727
+ ),
728
+ "Aquila-VL-2B": partial(LLaVA_OneVision, model_path="BAAI/Aquila-VL-2B-llava-qwen"),
729
+ "llava_video_qwen2_7b": partial(
730
+ LLaVA_OneVision, model_path="lmms-lab/LLaVA-Video-7B-Qwen2"
731
+ ),
732
+ "llava_video_qwen2_72b": partial(
733
+ LLaVA_OneVision, model_path="lmms-lab/LLaVA-Video-72B-Qwen2"
734
+ ),
735
+ }
736
+
737
+ varco_vision_series = {
738
+ "varco-vision-hf": partial(
739
+ LLaVA_OneVision_HF, model_path="NCSOFT/VARCO-VISION-14B-HF"
740
+ ),
741
+ "varco-vision-2-1.7b": partial(
742
+ VarcoVision, model_path="NCSOFT/VARCO-VISION-2.0-1.7B"
743
+ ),
744
+ "varco-vision-2-14b": partial(
745
+ VarcoVision, model_path="NCSOFT/VARCO-VISION-2.0-14B"
746
+ ),
747
+ }
748
+
749
+ vita_series = {
750
+ "vita": partial(VITA, model_path="VITA-MLLM/VITA", root=VITA_ROOT),
751
+ "vita_qwen2": partial(VITAQwen2, model_path="VITA-MLLM/VITA-1.5", root=VITA_ROOT),
752
+ }
753
+
754
+ long_vita_series = {
755
+ "Long-VITA-16K": partial(
756
+ LongVITA, model_path="VITA-MLLM/Long-VITA-16K_HF", max_num_frame=128
757
+ ),
758
+ "Long-VITA-128K": partial(
759
+ LongVITA, model_path="VITA-MLLM/Long-VITA-128K_HF", max_num_frame=256
760
+ ),
761
+ "Long-VITA-1M": partial(
762
+ LongVITA, model_path="VITA-MLLM/Long-VITA-1M_HF", max_num_frame=256
763
+ ),
764
+ }
765
+
766
+ internvl = {
767
+ "InternVL-Chat-V1-1": partial(
768
+ InternVLChat, model_path="OpenGVLab/InternVL-Chat-V1-1", version="V1.1"
769
+ ),
770
+ "InternVL-Chat-V1-2": partial(
771
+ InternVLChat, model_path="OpenGVLab/InternVL-Chat-V1-2", version="V1.2"
772
+ ),
773
+ "InternVL-Chat-V1-2-Plus": partial(
774
+ InternVLChat, model_path="OpenGVLab/InternVL-Chat-V1-2-Plus", version="V1.2"
775
+ ),
776
+ "InternVL-Chat-V1-5": partial(
777
+ InternVLChat,
778
+ model_path="OpenGVLab/InternVL-Chat-V1-5",
779
+ version="V1.5",
780
+ )
781
+ }
782
+
783
+ mini_internvl = {
784
+ "Mini-InternVL-Chat-2B-V1-5": partial(
785
+ InternVLChat, model_path="OpenGVLab/Mini-InternVL-Chat-2B-V1-5", version="V1.5"
786
+ ),
787
+ "Mini-InternVL-Chat-4B-V1-5": partial(
788
+ InternVLChat, model_path="OpenGVLab/Mini-InternVL-Chat-4B-V1-5", version="V1.5"
789
+ ),
790
+ }
791
+
792
+ internvl2 = {
793
+ "InternVL2-1B": partial(
794
+ InternVLChat, model_path="OpenGVLab/InternVL2-1B", version="V2.0"
795
+ ),
796
+ "InternVL2-2B": partial(
797
+ InternVLChat, model_path="OpenGVLab/InternVL2-2B", version="V2.0"
798
+ ),
799
+ "InternVL2-4B": partial(
800
+ InternVLChat, model_path="OpenGVLab/InternVL2-4B", version="V2.0"
801
+ ),
802
+ "InternVL2-8B": partial(
803
+ InternVLChat, model_path="OpenGVLab/InternVL2-8B", version="V2.0"
804
+ ),
805
+ "InternVL2-26B": partial(
806
+ InternVLChat, model_path="OpenGVLab/InternVL2-26B", version="V2.0"
807
+ ),
808
+ "InternVL2-40B": partial(
809
+ InternVLChat, model_path="OpenGVLab/InternVL2-40B", version="V2.0"
810
+ ),
811
+ "InternVL2-76B": partial(
812
+ InternVLChat, model_path="OpenGVLab/InternVL2-Llama3-76B", version="V2.0"
813
+ ),
814
+ "InternVL2-8B-MPO": partial(
815
+ InternVLChat, model_path="OpenGVLab/InternVL2-8B-MPO", version="V2.0"
816
+ ),
817
+ "InternVL2-8B-MPO-CoT": partial(
818
+ InternVLChat,
819
+ model_path="OpenGVLab/InternVL2-8B-MPO",
820
+ version="V2.0",
821
+ use_mpo_prompt=True,
822
+ ),
823
+ }
824
+
825
+ internvl2_5 = {
826
+ "InternVL2_5-1B": partial(
827
+ InternVLChat, model_path="OpenGVLab/InternVL2_5-1B", version="V2.0"
828
+ ),
829
+ "InternVL2_5-2B": partial(
830
+ InternVLChat, model_path="OpenGVLab/InternVL2_5-2B", version="V2.0"
831
+ ),
832
+ "QTuneVL1-2B": partial(
833
+ InternVLChat, model_path="hanchaow/QTuneVL1-2B", version="V2.0"
834
+ ),
835
+ "InternVL2_5-4B": partial(
836
+ InternVLChat, model_path="OpenGVLab/InternVL2_5-4B", version="V2.0"
837
+ ),
838
+ "InternVL2_5-8B": partial(
839
+ InternVLChat, model_path="OpenGVLab/InternVL2_5-8B", version="V2.0"
840
+ ),
841
+ "InternVL2_5-26B": partial(
842
+ InternVLChat, model_path="OpenGVLab/InternVL2_5-26B", version="V2.0"
843
+ ),
844
+ "InternVL2_5-38B": partial(
845
+ InternVLChat, model_path="OpenGVLab/InternVL2_5-38B", version="V2.0"
846
+ ),
847
+ "InternVL2_5-78B": partial(
848
+ InternVLChat, model_path="OpenGVLab/InternVL2_5-78B", version="V2.0"
849
+ ),
850
+ # InternVL2.5 series with Best-of-N evaluation
851
+ "InternVL2_5-8B-BoN-8": partial(
852
+ InternVLChat, model_path="OpenGVLab/InternVL2_5-8B", version="V2.0",
853
+ best_of_n=8, reward_model_path="OpenGVLab/VisualPRM-8B",
854
+ ),
855
+ }
856
+
857
+ internvl2_5_mpo = {
858
+ "InternVL2_5-1B-MPO": partial(
859
+ InternVLChat,
860
+ model_path="OpenGVLab/InternVL2_5-1B-MPO",
861
+ version="V2.0",
862
+ use_mpo_prompt=True,
863
+ ),
864
+ "InternVL2_5-2B-MPO": partial(
865
+ InternVLChat,
866
+ model_path="OpenGVLab/InternVL2_5-2B-MPO",
867
+ version="V2.0",
868
+ use_mpo_prompt=True,
869
+ ),
870
+ "InternVL2_5-4B-MPO": partial(
871
+ InternVLChat,
872
+ model_path="OpenGVLab/InternVL2_5-4B-MPO",
873
+ version="V2.0",
874
+ use_mpo_prompt=True,
875
+ ),
876
+ "InternVL2_5-8B-MPO": partial(
877
+ InternVLChat,
878
+ model_path="OpenGVLab/InternVL2_5-8B-MPO",
879
+ version="V2.0",
880
+ use_mpo_prompt=True,
881
+ ),
882
+ "InternVL2_5-26B-MPO": partial(
883
+ InternVLChat,
884
+ model_path="OpenGVLab/InternVL2_5-26B-MPO",
885
+ version="V2.0",
886
+ use_mpo_prompt=True,
887
+ ),
888
+ "InternVL2_5-38B-MPO": partial(
889
+ InternVLChat,
890
+ model_path="OpenGVLab/InternVL2_5-38B-MPO",
891
+ version="V2.0",
892
+ use_mpo_prompt=True,
893
+ ),
894
+ "InternVL2_5-78B-MPO": partial(
895
+ InternVLChat,
896
+ model_path="OpenGVLab/InternVL2_5-78B-MPO",
897
+ version="V2.0",
898
+ use_mpo_prompt=True,
899
+ ),
900
+ "InternVL2_5-8B-GUI": partial(
901
+ InternVLChat,
902
+ model_path="/fs-computility/mllm1/shared/zhaoxiangyu/models/internvl2_5_8b_internlm2_5_7b_dynamic_res_stage1",
903
+ version="V2.0",
904
+ max_new_tokens=512,
905
+ screen_parse=False,
906
+ ),
907
+ "InternVL3-7B-GUI": partial(
908
+ InternVLChat,
909
+ model_path="/fs-computility/mllm1/shared/zhaoxiangyu/GUI/checkpoints/internvl3_7b_dynamic_res_stage1_56/",
910
+ version="V2.0",
911
+ max_new_tokens=512,
912
+ screen_parse=False,
913
+ ),
914
+ }
915
+
916
+ internvl3 = {
917
+ "InternVL3-1B": partial(
918
+ InternVLChat, model_path="OpenGVLab/InternVL3-1B", version="V2.0"
919
+ ),
920
+ "InternVL3-2B": partial(
921
+ InternVLChat, model_path="OpenGVLab/InternVL3-2B", version="V2.0"
922
+ ),
923
+ "InternVL3-8B": partial(
924
+ InternVLChat, model_path="OpenGVLab/InternVL3-8B", version="V2.0",
925
+ ),
926
+ "InternVL3-9B": partial(
927
+ InternVLChat, model_path="OpenGVLab/InternVL3-9B", version="V2.0"
928
+ ),
929
+ "InternVL3-14B": partial(
930
+ InternVLChat, model_path="OpenGVLab/InternVL3-14B", version="V2.0"
931
+ ),
932
+ "InternVL3-38B": partial(
933
+ InternVLChat, model_path="OpenGVLab/InternVL3-38B", version="V2.0"
934
+ ),
935
+ "InternVL3-78B": partial(
936
+ InternVLChat, model_path="OpenGVLab/InternVL3-78B", version="V2.0"
937
+ ),
938
+ }
939
+
940
+ internvl3_5 = {
941
+ "InternVL3_5-1B": partial(
942
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-1B", version="V2.0"
943
+ ),
944
+ "InternVL3_5-2B": partial(
945
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-2B", version="V2.0"
946
+ ),
947
+ "InternVL3_5-4B": partial(
948
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-4B", version="V2.0"
949
+ ),
950
+ "InternVL3_5-8B": partial(
951
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-8B", version="V2.0"
952
+ ),
953
+ "InternVL3_5-14B": partial(
954
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-14B", version="V2.0"
955
+ ),
956
+ "InternVL3_5-GPT-OSS-20B-A4B-Preview": partial(
957
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-GPT-OSS-20B-A4B-Preview", version="V2.0"
958
+ ),
959
+ "InternVL3_5-30B-A3B": partial(
960
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-30B-A3B", version="V2.0"
961
+ ),
962
+ "InternVL3_5-38B": partial(
963
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-38B", version="V2.0"
964
+ ),
965
+ "InternVL3_5-241B-A28B": partial(
966
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-241B-A28B", version="V2.0"
967
+ ),
968
+
969
+ "InternVL3_5-1B-Thinking": partial(
970
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-1B", use_lmdeploy=True,
971
+ max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0"
972
+ ),
973
+ "InternVL3_5-2B-Thinking": partial(
974
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-2B", use_lmdeploy=True,
975
+ max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0"
976
+ ),
977
+ "InternVL3_5-4B-Thinking": partial(
978
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-4B", use_lmdeploy=True,
979
+ max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0"
980
+ ),
981
+ "InternVL3_5-8B-Thinking": partial(
982
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-8B", use_lmdeploy=True,
983
+ max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0"
984
+ ),
985
+ "InternVL3_5-14B-Thinking": partial(
986
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-14B", use_lmdeploy=True,
987
+ max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0"
988
+ ),
989
+ "InternVL3_5-GPT-OSS-20B-A4B-Preview-Thinking": partial(
990
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-GPT-OSS-20B-A4B-Preview", use_lmdeploy=True,
991
+ max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0"
992
+ ),
993
+ "InternVL3_5-30B-A3B-Thinking": partial(
994
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-30B-A3B", use_lmdeploy=True,
995
+ max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0"
996
+ ),
997
+ "InternVL3_5-38B-Thinking": partial(
998
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-38B", use_lmdeploy=True,
999
+ max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0"
1000
+ ),
1001
+ "InternVL3_5-241B-A28B-Thinking": partial(
1002
+ InternVLChat, model_path="OpenGVLab/InternVL3_5-241B-A28B", use_lmdeploy=True,
1003
+ max_new_tokens=2**16, cot_prompt_version="r1", do_sample=True, version="V2.0"
1004
+ ),
1005
+ }
1006
+
1007
+ sail_series = {
1008
+ "SAIL-VL-2B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-2B"),
1009
+ "SAIL-VL-1.5-2B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d5-2B", use_msac = True),
1010
+ "SAIL-VL-1.5-8B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d5-8B", use_msac = True),
1011
+ "SAIL-VL-1.6-8B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d6-8B", use_msac = True),
1012
+ "SAIL-VL-1.7-Thinking-2B-2507": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d7-Thinking-2B-2507", use_msac = True, use_cot=True, max_new_tokens=4096),
1013
+ "SAIL-VL-1.7-Thinking-8B-2507": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL-1d7-Thinking-8B-2507", use_msac = True, use_cot=True, max_new_tokens=4096),
1014
+ "SAIL-VL2-2B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL2-2B", use_msac = True),
1015
+ "SAIL-VL2-8B": partial(SailVL, model_path="BytedanceDouyinContent/SAIL-VL2-8B", use_msac = True),
1016
+ }
1017
+
1018
+ ristretto_series = {
1019
+ "Ristretto-3B": partial(Ristretto, model_path="LiAutoAD/Ristretto-3B"),
1020
+ }
1021
+
1022
+ yivl_series = {
1023
+ "Yi_VL_6B": partial(Yi_VL, model_path="01-ai/Yi-VL-6B", root=Yi_ROOT),
1024
+ "Yi_VL_34B": partial(Yi_VL, model_path="01-ai/Yi-VL-34B", root=Yi_ROOT),
1025
+ }
1026
+
1027
+ xcomposer_series = {
1028
+ "XComposer": partial(XComposer, model_path="internlm/internlm-xcomposer-vl-7b"),
1029
+ "sharecaptioner": partial(ShareCaptioner, model_path="Lin-Chen/ShareCaptioner"),
1030
+ "XComposer2": partial(XComposer2, model_path="internlm/internlm-xcomposer2-vl-7b"),
1031
+ "XComposer2_1.8b": partial(
1032
+ XComposer2, model_path="internlm/internlm-xcomposer2-vl-1_8b"
1033
+ ),
1034
+ "XComposer2_4KHD": partial(
1035
+ XComposer2_4KHD, model_path="internlm/internlm-xcomposer2-4khd-7b"
1036
+ ),
1037
+ "XComposer2d5": partial(
1038
+ XComposer2d5, model_path="internlm/internlm-xcomposer2d5-7b"
1039
+ ),
1040
+ }
1041
+
1042
+ minigpt4_series = {
1043
+ "MiniGPT-4-v2": partial(MiniGPT4, mode="v2", root=MiniGPT4_ROOT),
1044
+ "MiniGPT-4-v1-7B": partial(MiniGPT4, mode="v1_7b", root=MiniGPT4_ROOT),
1045
+ "MiniGPT-4-v1-13B": partial(MiniGPT4, mode="v1_13b", root=MiniGPT4_ROOT),
1046
+ }
1047
+
1048
+ idefics_series = {
1049
+ "idefics_9b_instruct": partial(
1050
+ IDEFICS, model_path="HuggingFaceM4/idefics-9b-instruct"
1051
+ ),
1052
+ "idefics_80b_instruct": partial(
1053
+ IDEFICS, model_path="HuggingFaceM4/idefics-80b-instruct"
1054
+ ),
1055
+ "idefics2_8b": partial(IDEFICS2, model_path="HuggingFaceM4/idefics2-8b"),
1056
+ # Idefics3 follows Idefics2 Pattern
1057
+ "Idefics3-8B-Llama3": partial(
1058
+ IDEFICS2, model_path="HuggingFaceM4/Idefics3-8B-Llama3"
1059
+ ),
1060
+ }
1061
+
1062
+ smolvlm_series = {
1063
+ "SmolVLM-256M": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-256M-Instruct"),
1064
+ "SmolVLM-500M": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-500M-Instruct"),
1065
+ "SmolVLM": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-Instruct"),
1066
+ "SmolVLM-DPO": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-Instruct-DPO"),
1067
+ "SmolVLM-Synthetic": partial(SmolVLM, model_path="HuggingFaceTB/SmolVLM-Synthetic"),
1068
+ "SmolVLM2-256M": partial(
1069
+ SmolVLM2, model_path="HuggingFaceTB/SmolVLM2-256M-Video-Instruct"
1070
+ ),
1071
+ "SmolVLM2-500M": partial(
1072
+ SmolVLM2, model_path="HuggingFaceTB/SmolVLM2-500M-Video-Instruct"
1073
+ ),
1074
+ "SmolVLM2": partial(SmolVLM2, model_path="HuggingFaceTB/SmolVLM2-2.2B-Instruct"),
1075
+ }
1076
+
1077
+ instructblip_series = {
1078
+ "instructblip_7b": partial(InstructBLIP, name="instructblip_7b"),
1079
+ "instructblip_13b": partial(InstructBLIP, name="instructblip_13b"),
1080
+ }
1081
+
1082
+ deepseekvl_series = {
1083
+ "deepseek_vl_7b": partial(DeepSeekVL, model_path="deepseek-ai/deepseek-vl-7b-chat"),
1084
+ "deepseek_vl_1.3b": partial(
1085
+ DeepSeekVL, model_path="deepseek-ai/deepseek-vl-1.3b-chat"
1086
+ ),
1087
+ }
1088
+
1089
+ deepseekvl2_series = {
1090
+ "deepseek_vl2_tiny": partial(
1091
+ DeepSeekVL2, model_path="deepseek-ai/deepseek-vl2-tiny"
1092
+ ),
1093
+ "deepseek_vl2_small": partial(
1094
+ DeepSeekVL2, model_path="deepseek-ai/deepseek-vl2-small"
1095
+ ),
1096
+ "deepseek_vl2": partial(DeepSeekVL2, model_path="deepseek-ai/deepseek-vl2"),
1097
+ }
1098
+
1099
+ janus_series = {
1100
+ "Janus-1.3B": partial(Janus, model_path="deepseek-ai/Janus-1.3B"),
1101
+ "Janus-Pro-1B": partial(Janus, model_path="deepseek-ai/Janus-Pro-1B"),
1102
+ "Janus-Pro-7B": partial(Janus, model_path="deepseek-ai/Janus-Pro-7B"),
1103
+ }
1104
+
1105
+ cogvlm_series = {
1106
+ "cogvlm-grounding-generalist": partial(
1107
+ CogVlm,
1108
+ model_path="THUDM/cogvlm-grounding-generalist-hf",
1109
+ tokenizer_name="lmsys/vicuna-7b-v1.5",
1110
+ ),
1111
+ "cogvlm-chat": partial(
1112
+ CogVlm, model_path="THUDM/cogvlm-chat-hf", tokenizer_name="lmsys/vicuna-7b-v1.5"
1113
+ ),
1114
+ "cogvlm2-llama3-chat-19B": partial(
1115
+ CogVlm, model_path="THUDM/cogvlm2-llama3-chat-19B"
1116
+ ),
1117
+ "glm-4v-9b": partial(GLM4v, model_path="THUDM/glm-4v-9b"),
1118
+ "GLM4_1VThinking-9b": partial(GLMThinking, model_path="THUDM/GLM-4.1V-9B-Thinking"),
1119
+ "GLM4_5V": partial(GLMThinking, model_path="THUDM/GLM-4.5V"),
1120
+ }
1121
+
1122
+ wemm_series = {
1123
+ "WeMM": partial(WeMM, model_path="feipengma/WeMM"),
1124
+ }
1125
+
1126
+ cambrian_series = {
1127
+ "cambrian_8b": partial(Cambrian, model_path="nyu-visionx/cambrian-8b"),
1128
+ "cambrian_13b": partial(Cambrian, model_path="nyu-visionx/cambrian-13b"),
1129
+ "cambrian_34b": partial(Cambrian, model_path="nyu-visionx/cambrian-34b"),
1130
+ }
1131
+
1132
+ chameleon_series = {
1133
+ "chameleon_7b": partial(Chameleon, model_path="facebook/chameleon-7b"),
1134
+ "chameleon_30b": partial(Chameleon, model_path="facebook/chameleon-30b"),
1135
+ }
1136
+
1137
+ vila_series = {
1138
+ "VILA1.5-3b": partial(VILA, model_path="Efficient-Large-Model/VILA1.5-3b"),
1139
+ "Llama-3-VILA1.5-8b": partial(
1140
+ VILA, model_path="Efficient-Large-Model/Llama-3-VILA1.5-8b"
1141
+ ),
1142
+ "VILA1.5-13b": partial(VILA, model_path="Efficient-Large-Model/VILA1.5-13b"),
1143
+ "VILA1.5-40b": partial(VILA, model_path="Efficient-Large-Model/VILA1.5-40b"),
1144
+ "NVILA-8B": partial(NVILA, model_path="Efficient-Large-Model/NVILA-8B"),
1145
+ "NVILA-15B": partial(NVILA, model_path="Efficient-Large-Model/NVILA-15B"),
1146
+ }
1147
+
1148
+ ovis_series = {
1149
+ "Ovis1.5-Llama3-8B": partial(Ovis, model_path="AIDC-AI/Ovis1.5-Llama3-8B"),
1150
+ "Ovis1.5-Gemma2-9B": partial(Ovis, model_path="AIDC-AI/Ovis1.5-Gemma2-9B"),
1151
+ "Ovis1.6-Gemma2-9B": partial(Ovis1_6, model_path="AIDC-AI/Ovis1.6-Gemma2-9B"),
1152
+ "Ovis1.6-Llama3.2-3B": partial(Ovis1_6, model_path="AIDC-AI/Ovis1.6-Llama3.2-3B"),
1153
+ "Ovis1.6-Gemma2-27B": partial(
1154
+ Ovis1_6_Plus, model_path="AIDC-AI/Ovis1.6-Gemma2-27B"
1155
+ ),
1156
+ "Ovis2-1B": partial(Ovis2, model_path="AIDC-AI/Ovis2-1B"),
1157
+ "Ovis2-2B": partial(Ovis2, model_path="AIDC-AI/Ovis2-2B"),
1158
+ "Ovis2-4B": partial(Ovis2, model_path="AIDC-AI/Ovis2-4B"),
1159
+ "Ovis2-8B": partial(Ovis2, model_path="AIDC-AI/Ovis2-8B"),
1160
+ "Ovis2-16B": partial(Ovis2, model_path="AIDC-AI/Ovis2-16B"),
1161
+ "Ovis2-34B": partial(Ovis2, model_path="AIDC-AI/Ovis2-34B"),
1162
+ "Ovis-U1-3B": partial(OvisU1, model_path="AIDC-AI/Ovis-U1-3B"),
1163
+ }
1164
+
1165
+ mantis_series = {
1166
+ "Mantis-8B-siglip-llama3": partial(
1167
+ Mantis, model_path="TIGER-Lab/Mantis-8B-siglip-llama3"
1168
+ ),
1169
+ "Mantis-8B-clip-llama3": partial(
1170
+ Mantis, model_path="TIGER-Lab/Mantis-8B-clip-llama3"
1171
+ ),
1172
+ "Mantis-8B-Idefics2": partial(Mantis, model_path="TIGER-Lab/Mantis-8B-Idefics2"),
1173
+ "Mantis-8B-Fuyu": partial(Mantis, model_path="TIGER-Lab/Mantis-8B-Fuyu"),
1174
+ }
1175
+
1176
+ phi3_series = {
1177
+ "Phi-3-Vision": partial(
1178
+ Phi3Vision, model_path="microsoft/Phi-3-vision-128k-instruct"
1179
+ ),
1180
+ "Phi-3.5-Vision": partial(
1181
+ Phi3_5Vision, model_path="microsoft/Phi-3.5-vision-instruct"
1182
+ ),
1183
+ }
1184
+
1185
+ phi4_series = {
1186
+ 'Phi-4-Vision': partial(Phi4Multimodal, model_path='microsoft/Phi-4-multimodal-instruct'),
1187
+ }
1188
+
1189
+ xgen_mm_series = {
1190
+ "xgen-mm-phi3-interleave-r-v1.5": partial(
1191
+ XGenMM, model_path="Salesforce/xgen-mm-phi3-mini-instruct-interleave-r-v1.5"
1192
+ ),
1193
+ "xgen-mm-phi3-dpo-r-v1.5": partial(
1194
+ XGenMM, model_path="Salesforce/xgen-mm-phi3-mini-instruct-dpo-r-v1.5"
1195
+ ),
1196
+ }
1197
+
1198
+ hawkvl_series = {
1199
+ "HawkVL-2B": partial(
1200
+ HawkVL,
1201
+ model_path="xjtupanda/HawkVL-2B",
1202
+ min_pixels=4 * 28 * 28,
1203
+ max_pixels=6800 * 28 * 28,
1204
+ use_custom_prompt=True
1205
+ )
1206
+ }
1207
+
1208
+ qwen2vl_series = {
1209
+ "Qwen-VL-Max-20250813": partial(
1210
+ Qwen2VLAPI,
1211
+ model="qwen-vl-max-2025-08-13",
1212
+ min_pixels=1280 * 28 * 28,
1213
+ max_pixels=16384 * 28 * 28,
1214
+ max_length=8192,
1215
+ ),
1216
+ "Qwen-VL-Max-0809": partial(
1217
+ Qwen2VLAPI,
1218
+ model="qwen-vl-max-0809",
1219
+ min_pixels=1280 * 28 * 28,
1220
+ max_pixels=16384 * 28 * 28,
1221
+ ),
1222
+ "Qwen-VL-Plus-0809": partial(
1223
+ Qwen2VLAPI,
1224
+ model="qwen-vl-plus-0809",
1225
+ min_pixels=1280 * 28 * 28,
1226
+ max_pixels=16384 * 28 * 28,
1227
+ ),
1228
+ "QVQ-72B-Preview": partial(
1229
+ Qwen2VLChat,
1230
+ model_path="Qwen/QVQ-72B-Preview",
1231
+ min_pixels=1280 * 28 * 28,
1232
+ max_pixels=16384 * 28 * 28,
1233
+ system_prompt="You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.",
1234
+ max_new_tokens=8192,
1235
+ post_process=False,
1236
+ ),
1237
+ "Qwen2-VL-72B-Instruct": partial(
1238
+ Qwen2VLChat,
1239
+ model_path="Qwen/Qwen2-VL-72B-Instruct",
1240
+ min_pixels=1280 * 28 * 28,
1241
+ max_pixels=16384 * 28 * 28,
1242
+ ),
1243
+ "Qwen2-VL-7B-Instruct": partial(
1244
+ Qwen2VLChat,
1245
+ model_path="Qwen/Qwen2-VL-7B-Instruct",
1246
+ min_pixels=1280 * 28 * 28,
1247
+ max_pixels=16384 * 28 * 28,
1248
+ ),
1249
+ "Qwen2-VL-7B-Instruct-AWQ": partial(
1250
+ Qwen2VLChat,
1251
+ model_path="Qwen/Qwen2-VL-7B-Instruct-AWQ",
1252
+ min_pixels=1280 * 28 * 28,
1253
+ max_pixels=16384 * 28 * 28,
1254
+ ),
1255
+ "Qwen2-VL-7B-Instruct-GPTQ-Int4": partial(
1256
+ Qwen2VLChat,
1257
+ model_path="Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int4",
1258
+ min_pixels=1280 * 28 * 28,
1259
+ max_pixels=16384 * 28 * 28,
1260
+ ),
1261
+ "Qwen2-VL-7B-Instruct-GPTQ-Int8": partial(
1262
+ Qwen2VLChat,
1263
+ model_path="Qwen/Qwen2-VL-7B-Instruct-GPTQ-Int8",
1264
+ min_pixels=1280 * 28 * 28,
1265
+ max_pixels=16384 * 28 * 28,
1266
+ ),
1267
+ "Qwen2-VL-2B-Instruct": partial(
1268
+ Qwen2VLChat,
1269
+ model_path="Qwen/Qwen2-VL-2B-Instruct",
1270
+ min_pixels=1280 * 28 * 28,
1271
+ max_pixels=16384 * 28 * 28,
1272
+ ),
1273
+ "Qwen2-VL-2B-Instruct-AWQ": partial(
1274
+ Qwen2VLChat,
1275
+ model_path="Qwen/Qwen2-VL-2B-Instruct-AWQ",
1276
+ min_pixels=1280 * 28 * 28,
1277
+ max_pixels=16384 * 28 * 28,
1278
+ ),
1279
+ "Qwen2-VL-2B-Instruct-GPTQ-Int4": partial(
1280
+ Qwen2VLChat,
1281
+ model_path="Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4",
1282
+ min_pixels=1280 * 28 * 28,
1283
+ max_pixels=16384 * 28 * 28,
1284
+ ),
1285
+ "Qwen2-VL-2B-Instruct-GPTQ-Int8": partial(
1286
+ Qwen2VLChat,
1287
+ model_path="Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8",
1288
+ min_pixels=1280 * 28 * 28,
1289
+ max_pixels=16384 * 28 * 28,
1290
+ ),
1291
+ "XinYuan-VL-2B-Instruct": partial(
1292
+ Qwen2VLChat,
1293
+ model_path="Cylingo/Xinyuan-VL-2B",
1294
+ min_pixels=1280 * 28 * 28,
1295
+ max_pixels=16384 * 28 * 28,
1296
+ ),
1297
+ "Qwen2.5-VL-3B-Instruct": partial(
1298
+ Qwen2VLChat,
1299
+ model_path="Qwen/Qwen2.5-VL-3B-Instruct",
1300
+ min_pixels=1280 * 28 * 28,
1301
+ max_pixels=16384 * 28 * 28,
1302
+ use_custom_prompt=False,
1303
+ ),
1304
+ "Qwen2.5-VL-3B-Instruct-AWQ": partial(
1305
+ Qwen2VLChat,
1306
+ model_path="Qwen/Qwen2.5-VL-3B-Instruct-AWQ",
1307
+ min_pixels=1280 * 28 * 28,
1308
+ max_pixels=16384 * 28 * 28,
1309
+ use_custom_prompt=False,
1310
+ ),
1311
+ "Qwen2.5-VL-7B-Instruct": partial(
1312
+ Qwen2VLChat,
1313
+ model_path="Qwen/Qwen2.5-VL-7B-Instruct",
1314
+ min_pixels=1280 * 28 * 28,
1315
+ max_pixels=16384 * 28 * 28,
1316
+ use_custom_prompt=False,
1317
+ ),
1318
+ "Qwen2.5-VL-7B-Instruct-ForVideo": partial(
1319
+ Qwen2VLChat,
1320
+ model_path="Qwen/Qwen2.5-VL-7B-Instruct",
1321
+ min_pixels=128 * 28 * 28,
1322
+ max_pixels=768 * 28 * 28,
1323
+ total_pixels=24576 * 28 * 28,
1324
+ use_custom_prompt=False,
1325
+ ),
1326
+ "Qwen2.5-VL-7B-Instruct-AWQ": partial(
1327
+ Qwen2VLChat,
1328
+ model_path="Qwen/Qwen2.5-VL-7B-Instruct-AWQ",
1329
+ min_pixels=1280 * 28 * 28,
1330
+ max_pixels=16384 * 28 * 28,
1331
+ use_custom_prompt=False,
1332
+ ),
1333
+ "Qwen2.5-VL-32B-Instruct": partial(
1334
+ Qwen2VLChat,
1335
+ model_path="Qwen/Qwen2.5-VL-32B-Instruct",
1336
+ min_pixels=1280 * 28 * 28,
1337
+ max_pixels=16384 * 28 * 28,
1338
+ use_custom_prompt=False,
1339
+ ),
1340
+ "Qwen2.5-VL-72B-Instruct": partial(
1341
+ Qwen2VLChat,
1342
+ model_path="Qwen/Qwen2.5-VL-72B-Instruct",
1343
+ min_pixels=1280 * 28 * 28,
1344
+ max_pixels=16384 * 28 * 28,
1345
+ use_custom_prompt=False,
1346
+ ),
1347
+ "MiMo-VL-7B-SFT": partial(
1348
+ Qwen2VLChat,
1349
+ model_path="XiaomiMiMo/MiMo-VL-7B-SFT",
1350
+ min_pixels=1280 * 28 * 28,
1351
+ max_pixels=16384 * 28 * 28,
1352
+ use_custom_prompt=False,
1353
+ use_lmdeploy=True
1354
+ ),
1355
+ "MiMo-VL-7B-RL": partial(
1356
+ Qwen2VLChat,
1357
+ model_path="XiaomiMiMo/MiMo-VL-7B-RL",
1358
+ min_pixels=1280 * 28 * 28,
1359
+ max_pixels=16384 * 28 * 28,
1360
+ use_custom_prompt=False,
1361
+ use_lmdeploy=True
1362
+ ),
1363
+ "Qwen2.5-VL-72B-Instruct-ForVideo": partial(
1364
+ Qwen2VLChat,
1365
+ model_path="Qwen/Qwen2.5-VL-72B-Instruct",
1366
+ min_pixels=128 * 28 * 28,
1367
+ max_pixels=768 * 28 * 28,
1368
+ total_pixels=24576 * 28 * 28,
1369
+ use_custom_prompt=False,
1370
+ ),
1371
+ "Qwen2.5-VL-72B-Instruct-AWQ": partial(
1372
+ Qwen2VLChat,
1373
+ model_path="Qwen/Qwen2.5-VL-72B-Instruct-AWQ",
1374
+ min_pixels=1280 * 28 * 28,
1375
+ max_pixels=16384 * 28 * 28,
1376
+ use_custom_prompt=False,
1377
+ ),
1378
+ "Qwen2.5-Omni-7B-ForVideo": partial(
1379
+ Qwen2VLChat,
1380
+ model_path="Qwen/Qwen2.5-Omni-7B",
1381
+ min_pixels=128 * 28 * 28,
1382
+ max_pixels=768 * 28 * 28,
1383
+ total_pixels=24576 * 28 * 28,
1384
+ use_custom_prompt=False,
1385
+ use_audio_in_video=True, # set use audio in video
1386
+ ),
1387
+ "Qwen2.5-Omni-7B": partial(
1388
+ Qwen2VLChat,
1389
+ model_path="Qwen/Qwen2.5-Omni-7B",
1390
+ min_pixels=1280 * 28 * 28,
1391
+ max_pixels=16384 * 28 * 28,
1392
+ use_custom_prompt=False,
1393
+ ),
1394
+ 'VLM-R1': partial(
1395
+ VLMR1Chat,
1396
+ model_path='omlab/VLM-R1-Qwen2.5VL-3B-Math-0305',
1397
+ min_pixels=1280*28*28,
1398
+ max_pixels=16384*28*28,
1399
+ use_custom_prompt=False),
1400
+ 'VLAA-Thinker-Qwen2.5VL-3B': partial(
1401
+ VLAAThinkerChat,
1402
+ model_path='UCSC-VLAA/VLAA-Thinker-Qwen2.5VL-3B',
1403
+ min_pixels=1280*28*28,
1404
+ max_pixels=16384*28*28,
1405
+ use_custom_prompt=False,
1406
+ post_process=True, # post processing for evaluation
1407
+ system_prompt=(''
1408
+ "You are VL-Thinking🤔, a helpful assistant with excellent reasoning ability."
1409
+ " A user asks you a question, and you should try to solve it."
1410
+ " You should first think about the reasoning process in the mind and then provides the user with the answer."
1411
+ " The reasoning process and answer are enclosed within <think> </think> and"
1412
+ "<answer> </answer> tags, respectively, i.e., <think> reasoning process here </think>"
1413
+ "<answer> answer here </answer>"
1414
+ ),
1415
+ ),
1416
+ 'VLAA-Thinker-Qwen2.5VL-7B': partial(
1417
+ VLAAThinkerChat,
1418
+ model_path='UCSC-VLAA/VLAA-Thinker-Qwen2.5VL-7B',
1419
+ min_pixels=1280*28*28,
1420
+ max_pixels=16384*28*28,
1421
+ use_custom_prompt=False,
1422
+ post_process=True, # post processing for evaluation
1423
+ system_prompt=(''
1424
+ "You are VL-Thinking🤔, a helpful assistant with excellent reasoning ability."
1425
+ " A user asks you a question, and you should try to solve it."
1426
+ " You should first think about the reasoning process in the mind and then provides the user with the answer."
1427
+ " The reasoning process and answer are enclosed within <think> </think> and"
1428
+ "<answer> </answer> tags, respectively, i.e., <think> reasoning process here </think>"
1429
+ "<answer> answer here </answer>"
1430
+ ),
1431
+ ),
1432
+ 'WeThink-Qwen2.5VL-7B': partial(
1433
+ WeThinkVL,
1434
+ model_path='yangjie-cv/WeThink-Qwen2.5VL-7B',
1435
+ min_pixels=1280*28*28,
1436
+ max_pixels=16384*28*28,
1437
+ use_custom_prompt=False,
1438
+ system_prompt=("You FIRST think about the reasoning process as an internal monologue and then provide the final answer.\nThe reasoning process MUST BE enclosed within <think> </think> tags. The final answer MUST BE enclosed within <answer> </answer> tags."
1439
+ ),
1440
+ ),
1441
+ }
1442
+
1443
+ slime_series = {
1444
+ "Slime-7B": partial(SliME, model_path="yifanzhang114/SliME-vicuna-7B"),
1445
+ "Slime-8B": partial(SliME, model_path="yifanzhang114/SliME-Llama3-8B"),
1446
+ "Slime-13B": partial(SliME, model_path="yifanzhang114/SliME-vicuna-13B"),
1447
+ }
1448
+
1449
+ eagle_series = {
1450
+ "Eagle-X4-8B-Plus": partial(Eagle, model_path="NVEagle/Eagle-X4-8B-Plus"),
1451
+ "Eagle-X4-13B-Plus": partial(Eagle, model_path="NVEagle/Eagle-X4-13B-Plus"),
1452
+ "Eagle-X5-7B": partial(Eagle, model_path="NVEagle/Eagle-X5-7B"),
1453
+ "Eagle-X5-13B": partial(Eagle, model_path="NVEagle/Eagle-X5-13B"),
1454
+ "Eagle-X5-13B-Chat": partial(Eagle, model_path="NVEagle/Eagle-X5-13B-Chat"),
1455
+ "Eagle-X5-34B-Chat": partial(Eagle, model_path="NVEagle/Eagle-X5-34B-Chat"),
1456
+ "Eagle-X5-34B-Plus": partial(Eagle, model_path="NVEagle/Eagle-X5-34B-Plus"),
1457
+ }
1458
+
1459
+ moondream_series = {
1460
+ "Moondream1": partial(Moondream1, model_path="vikhyatk/moondream1"),
1461
+ "Moondream2": partial(Moondream2, model_path="vikhyatk/moondream2"),
1462
+ }
1463
+
1464
+ llama_series = {
1465
+ "Llama-3.2-11B-Vision-Instruct": partial(
1466
+ llama_vision, model_path="meta-llama/Llama-3.2-11B-Vision-Instruct"
1467
+ ),
1468
+ "LLaVA-CoT": partial(llama_vision, model_path="Xkev/Llama-3.2V-11B-cot"),
1469
+ "Llama-3.2-90B-Vision-Instruct": partial(
1470
+ llama_vision, model_path="meta-llama/Llama-3.2-90B-Vision-Instruct"
1471
+ ),
1472
+ "Llama-4-Scout-17B-16E-Instruct": partial(
1473
+ llama4, model_path="meta-llama/Llama-4-Scout-17B-16E-Instruct", use_vllm=True
1474
+ ),
1475
+ }
1476
+
1477
+ molmo_series = {
1478
+ "molmoE-1B-0924": partial(molmo, model_path="allenai/MolmoE-1B-0924"),
1479
+ "molmo-7B-D-0924": partial(molmo, model_path="allenai/Molmo-7B-D-0924"),
1480
+ "molmo-7B-O-0924": partial(molmo, model_path="allenai/Molmo-7B-O-0924"),
1481
+ "molmo-72B-0924": partial(molmo, model_path="allenai/Molmo-72B-0924"),
1482
+ }
1483
+
1484
+ kosmos_series = {
1485
+ "Kosmos2": partial(Kosmos2, model_path="microsoft/kosmos-2-patch14-224")
1486
+ }
1487
+
1488
+ points_series = {
1489
+ "POINTS-Yi-1.5-9B-Chat": partial(
1490
+ POINTS, model_path="WePOINTS/POINTS-Yi-1-5-9B-Chat"
1491
+ ),
1492
+ "POINTS-Qwen-2.5-7B-Chat": partial(
1493
+ POINTS, model_path="WePOINTS/POINTS-Qwen-2-5-7B-Chat"
1494
+ ),
1495
+ "POINTSV15-Qwen-2.5-7B-Chat": partial(
1496
+ POINTSV15, model_path="WePOINTS/POINTS-1-5-Qwen-2-5-7B-Chat"
1497
+ ),
1498
+ }
1499
+
1500
+ nvlm_series = {
1501
+ "NVLM": partial(NVLM, model_path="nvidia/NVLM-D-72B"),
1502
+ }
1503
+
1504
+ vintern_series = {
1505
+ "Vintern-3B-beta": partial(VinternChat, model_path="5CD-AI/Vintern-3B-beta"),
1506
+ "Vintern-1B-v2": partial(VinternChat, model_path="5CD-AI/Vintern-1B-v2"),
1507
+ }
1508
+
1509
+ aria_series = {"Aria": partial(Aria, model_path="rhymes-ai/Aria")}
1510
+
1511
+ h2ovl_series = {
1512
+ "h2ovl-mississippi-2b": partial(H2OVLChat, model_path="h2oai/h2ovl-mississippi-2b"),
1513
+ "h2ovl-mississippi-1b": partial(
1514
+ H2OVLChat, model_path="h2oai/h2ovl-mississippi-800m"
1515
+ ),
1516
+ }
1517
+
1518
+ valley_series = {
1519
+ "valley2": partial(
1520
+ Valley2Chat, model_path="bytedance-research/Valley-Eagle-7B"
1521
+ ),
1522
+ "valley2_dpo": partial(
1523
+ Valley2Chat, model_path="bytedance-research/Valley2-DPO"
1524
+ ),
1525
+ }
1526
+
1527
+ ola_series = {
1528
+ "ola": partial(Ola, model_path="THUdyh/Ola-7b"),
1529
+ }
1530
+
1531
+ xvl_series = {
1532
+ "X-VL-4B": partial(X_VL_HF, model_path="YannQi/X-VL-4B", temperature=0, retry=10),
1533
+ }
1534
+
1535
+ ross_series = {
1536
+ "ross-qwen2-7b": partial(Ross, model_path="HaochenWang/ross-qwen2-7b"),
1537
+ }
1538
+
1539
+ ursa_series = {
1540
+ "URSA-8B": partial(UrsaChat, model_path="URSA-MATH/URSA-8B"),
1541
+ "URSA-8B-PS-GRPO": partial(UrsaChat, model_path="URSA-MATH/URSA-8B-PS-GRPO")
1542
+ }
1543
+
1544
+ gemma_series = {
1545
+ "paligemma-3b-mix-448": partial(
1546
+ PaliGemma, model_path="google/paligemma-3b-mix-448"
1547
+ ),
1548
+ 'Gemma3-4B': partial(Gemma3, model_path='google/gemma-3-4b-it'),
1549
+ 'Gemma3-12B': partial(Gemma3, model_path='google/gemma-3-12b-it'),
1550
+ 'Gemma3-27B': partial(Gemma3, model_path='google/gemma-3-27b-it')
1551
+ }
1552
+
1553
+ aguvis_series = {
1554
+ "aguvis_7b": partial(
1555
+ Qwen2VLChatAguvis,
1556
+ model_path=os.getenv(
1557
+ "EVAL_MODEL",
1558
+ "xlangai/Aguvis-7B-720P",
1559
+ ),
1560
+ min_pixels=256 * 28 * 28,
1561
+ max_pixels=46 * 26 * 28 * 28,
1562
+ use_custom_prompt=False,
1563
+ mode='grounding',
1564
+ )
1565
+ }
1566
+
1567
+ kimi_series = {
1568
+ 'Kimi-VL-A3B-Thinking': partial(KimiVL, model_path='moonshotai/Kimi-VL-A3B-Thinking'),
1569
+ 'Kimi-VL-A3B-Instruct': partial(KimiVL, model_path='moonshotai/Kimi-VL-A3B-Instruct'),
1570
+ 'Kimi-VL-A3B-Thinking-2506': partial(KimiVL, model_path='moonshotai/Kimi-VL-A3B-Thinking-2506', temperature=0.8, max_tokens=32768, extract_summary=True)
1571
+ }
1572
+
1573
+ flash_vl = {
1574
+ 'Flash-VL-2B-Dynamic-ISS': partial(FlashVL, model_path='FlashVL/FlashVL-2B-Dynamic-ISS')
1575
+ }
1576
+
1577
+
1578
+ oryx_series = {
1579
+ 'oryx': partial(Oryx, model_path="THUdyh/Oryx-1.5-7B"),
1580
+ }
1581
+
1582
+ # recommend: vllm serve moonshotai/Kimi-VL-A3B-Thinking-2506
1583
+ # --served-model-name api-kimi-vl-thinking-2506 --trust-remote-code
1584
+ # --tensor-parallel-size 2 --max-num-batched-tokens 131072
1585
+ # --max-model-len 131072 --limit-mm-per-prompt image=256
1586
+ kimi_vllm_series = {
1587
+ "api-kimi-vl-thinking-2506": partial(
1588
+ KimiVLAPI,
1589
+ model="api-kimi-vl-thinking-2506",
1590
+ ),
1591
+ "api-kimi-vl-thinking": partial(
1592
+ KimiVLAPI,
1593
+ model="api-kimi-vl-thinking",
1594
+ ),
1595
+ "api-kimi-vl": partial(
1596
+ KimiVLAPI,
1597
+ model="api-kimi-vl",
1598
+ max_new_tokens=2048,
1599
+ temperature=0,
1600
+ ),
1601
+ }
1602
+
1603
+
1604
+ treevgr_series = {
1605
+ 'TreeVGR-7B': partial(
1606
+ TreeVGR,
1607
+ model_path='HaochenWang/TreeVGR-7B',
1608
+ min_pixels=1280*28*28, max_pixels=16384*28*28,
1609
+ ),
1610
+ }
1611
+
1612
+ # QTuneVL series
1613
+ qtunevl_series = {
1614
+ "QTuneVL1_5-2B": partial(
1615
+ QTuneVLChat, model_path="hanchaow/QTuneVL1_5-2B", version="V1.5"
1616
+ ),
1617
+
1618
+ "QTuneVL1_5-3B": partial(
1619
+ QTuneVL,
1620
+ model_path="hanchaow/QTuneVL1_5-3B",
1621
+ min_pixels=1280 * 28 * 28,
1622
+ max_pixels=16384 * 28 * 28,
1623
+ use_custom_prompt=True,
1624
+ post_process=True
1625
+ ),
1626
+ }
1627
+
1628
+ logics_series = {
1629
+ "Logics-Thinking": partial(Logics_Thinking,model_path='Logics-MLLM/Logics-Thinking'),
1630
+ }
1631
+
1632
+
1633
+ internvl_groups = [
1634
+ internvl, internvl2, internvl2_5, mini_internvl, internvl2_5_mpo,
1635
+ internvl3, internvl3_5
1636
+ ]
1637
+ internvl_series = {}
1638
+ for group in internvl_groups:
1639
+ internvl_series.update(group)
1640
+
1641
+ supported_VLM = {}
1642
+
1643
+ model_groups = [
1644
+ ungrouped, o1_apis, api_models, xtuner_series, qwen_series, llava_series, granite_vision_series,
1645
+ internvl_series, yivl_series, xcomposer_series, minigpt4_series,
1646
+ idefics_series, instructblip_series, deepseekvl_series, deepseekvl2_series,
1647
+ janus_series, minicpm_series, cogvlm_series, wemm_series, cambrian_series,
1648
+ chameleon_series, video_models, ovis_series, vila_series, mantis_series,
1649
+ mmalaya_series, phi3_series, phi4_series, xgen_mm_series, qwen2vl_series,
1650
+ slime_series, eagle_series, moondream_series, llama_series, molmo_series,
1651
+ kosmos_series, points_series, nvlm_series, vintern_series, h2ovl_series,
1652
+ aria_series, smolvlm_series, sail_series, valley_series, vita_series,
1653
+ ross_series, emu_series, ola_series, ursa_series, gemma_series,
1654
+ long_vita_series, ristretto_series, kimi_series, aguvis_series, hawkvl_series,
1655
+ flash_vl, kimi_vllm_series, oryx_series, treevgr_series, varco_vision_series, qtunevl_series, xvl_series, thyme_series,logics_series
1656
+ ]
1657
+
1658
+ for grp in model_groups:
1659
+ supported_VLM.update(grp)
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/cgbench.cpython-310.pyc ADDED
Binary file (31 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dude.cpython-310.pyc ADDED
Binary file (7.01 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/dynamath.cpython-310.pyc ADDED
Binary file (7.57 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_base.cpython-310.pyc ADDED
Binary file (6.94 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/image_ccocr.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmgenbench.cpython-310.pyc ADDED
Binary file (2.98 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmifeval.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mmlongbench.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/moat.cpython-310.pyc ADDED
Binary file (6.11 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/mvbench.cpython-310.pyc ADDED
Binary file (20.1 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/text_base.cpython-310.pyc ADDED
Binary file (3.4 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/vl_rewardbench.cpython-310.pyc ADDED
Binary file (6.56 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/wildvision.cpython-310.pyc ADDED
Binary file (7.68 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/__pycache__/worldsense.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
VLMEvalKit-sudoku/vlmeval/dataset/image_ccocr.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+
3
+ import os
4
+ import re
5
+ import tempfile
6
+ import json
7
+ from functools import partial
8
+ import pandas as pd
9
+
10
+ from .image_base import ImageBaseDataset
11
+ from ..smp import *
12
+ from ..smp.file import get_intermediate_file_path
13
+
14
+ # should be the same as FAIL_MSG definded in vlmeval/inference.py
15
+ FAIL_MSG = 'Failed to obtain answer via API.'
16
+
17
+
18
+ class CCOCRDataset(ImageBaseDataset):
19
+ TYPE = 'VQA'
20
+ DATASET_URL_MODELSCOPE = {
21
+ "CCOCR_DocParsing_DocPhotoChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_photo_chn_75.tsv",
22
+ "CCOCR_DocParsing_DocPhotoEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_photo_eng_75.tsv",
23
+ "CCOCR_DocParsing_DocScanChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_scan_chn_75.tsv",
24
+ "CCOCR_DocParsing_DocScanEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/doc/doc_scan_eng_75.tsv",
25
+ "CCOCR_DocParsing_TablePhotoChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_photo_chn_75.tsv",
26
+ "CCOCR_DocParsing_TablePhotoEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_photo_eng_75.tsv",
27
+ "CCOCR_DocParsing_TableScanChn": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_scan_chn_75.tsv",
28
+ "CCOCR_DocParsing_TableScanEng": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/table/table_scan_eng_75.tsv",
29
+ "CCOCR_DocParsing_MolecularHandwriting": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/molecular/molecular_handwriting_100.tsv",
30
+ "CCOCR_DocParsing_FormulaHandwriting": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/doc_parsing/formula/formula_handwriting_100.tsv",
31
+ "CCOCR_Kie_Sroie2019Word": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/sroie2019_word_347.tsv",
32
+ "CCOCR_Kie_Cord": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/CORD_100.tsv",
33
+ "CCOCR_Kie_EphoieScut": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/EPHOIE_SCUT_311.tsv",
34
+ "CCOCR_Kie_Poie": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/constrained_category/POIE_250.tsv",
35
+ "CCOCR_Kie_ColdSibr": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/open_category/COLD_SIBR_400.tsv",
36
+ "CCOCR_Kie_ColdCell": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/kie/open_category/COLD_CELL_600.tsv",
37
+ "CCOCR_MultiLanOcr_Arabic": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Arabic/Arabic_150.tsv",
38
+ "CCOCR_MultiLanOcr_French": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/French/French_150.tsv",
39
+ "CCOCR_MultiLanOcr_German": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/German/German_150.tsv",
40
+ "CCOCR_MultiLanOcr_Italian": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Italian/Italian_150.tsv",
41
+ "CCOCR_MultiLanOcr_Japanese": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Japanese/Japanese_150.tsv",
42
+ "CCOCR_MultiLanOcr_Korean": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Korean/Korean_150.tsv",
43
+ "CCOCR_MultiLanOcr_Portuguese": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Portuguese/Portuguese_150.tsv",
44
+ "CCOCR_MultiLanOcr_Russian": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Russian/Russian_150.tsv",
45
+ "CCOCR_MultiLanOcr_Spanish": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Spanish/Spanish_150.tsv",
46
+ "CCOCR_MultiLanOcr_Vietnamese": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_lan_ocr/Vietnamese/Vietnamese_150.tsv",
47
+ "CCOCR_MultiSceneOcr_Cord": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/CORD_100.tsv",
48
+ "CCOCR_MultiSceneOcr_Funsd": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/FUNSD_50.tsv",
49
+ "CCOCR_MultiSceneOcr_Iam": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/IAM_50.tsv",
50
+ "CCOCR_MultiSceneOcr_ZhDoc": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/zh_doc_100.tsv",
51
+ "CCOCR_MultiSceneOcr_ZhHandwriting": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/document_text/zh_handwriting_50.tsv",
52
+ "CCOCR_MultiSceneOcr_Hieragent": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/Hieragent_100.tsv",
53
+ "CCOCR_MultiSceneOcr_Ic15": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/IC15_500.tsv",
54
+ "CCOCR_MultiSceneOcr_Inversetext": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/InverseText_500.tsv",
55
+ "CCOCR_MultiSceneOcr_Totaltext": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/TotalText_300.tsv",
56
+ "CCOCR_MultiSceneOcr_ZhScene": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/scene_text/zh_scene_450.tsv",
57
+ "CCOCR_MultiSceneOcr_UgcLaion": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/ugc_text/ugc_laion_400.tsv",
58
+ "CCOCR_MultiSceneOcr_ZhDense": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/ugc_text/zh_dense_50.tsv",
59
+ "CCOCR_MultiSceneOcr_ZhVertical": "https://www.modelscope.cn/datasets/Qwen/CC-OCR/resolve/master/multi_scene_ocr/ugc_text/zh_vertical_100.tsv",
60
+ "CCOCR": "http://opencompass.openxlab.space/utils/VLMEval/CCOCR.tsv"
61
+ }
62
+
63
+ DATASET_URL_HUGGINGFACE = {
64
+ "CCOCR_DocParsing_DocPhotoChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_photo_chn_75.tsv",
65
+ "CCOCR_DocParsing_DocPhotoEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_photo_eng_75.tsv",
66
+ "CCOCR_DocParsing_DocScanChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_scan_chn_75.tsv",
67
+ "CCOCR_DocParsing_DocScanEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/doc/doc_scan_eng_75.tsv",
68
+ "CCOCR_DocParsing_TablePhotoChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_photo_chn_75.tsv",
69
+ "CCOCR_DocParsing_TablePhotoEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_photo_eng_75.tsv",
70
+ "CCOCR_DocParsing_TableScanChn": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_scan_chn_75.tsv",
71
+ "CCOCR_DocParsing_TableScanEng": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/table/table_scan_eng_75.tsv",
72
+ "CCOCR_DocParsing_MolecularHandwriting": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/molecular/molecular_handwriting_100.tsv",
73
+ "CCOCR_DocParsing_FormulaHandwriting": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/doc_parsing/formula/formula_handwriting_100.tsv",
74
+ "CCOCR_Kie_Sroie2019Word": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/sroie2019_word_347.tsv",
75
+ "CCOCR_Kie_Cord": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/CORD_100.tsv",
76
+ "CCOCR_Kie_EphoieScut": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/EPHOIE_SCUT_311.tsv",
77
+ "CCOCR_Kie_Poie": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/constrained_category/POIE_250.tsv",
78
+ "CCOCR_Kie_ColdSibr": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/open_category/COLD_SIBR_400.tsv",
79
+ "CCOCR_Kie_ColdCell": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/kie/open_category/COLD_CELL_600.tsv",
80
+ "CCOCR_MultiLanOcr_Arabic": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Arabic/Arabic_150.tsv",
81
+ "CCOCR_MultiLanOcr_French": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/French/French_150.tsv",
82
+ "CCOCR_MultiLanOcr_German": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/German/German_150.tsv",
83
+ "CCOCR_MultiLanOcr_Italian": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Italian/Italian_150.tsv",
84
+ "CCOCR_MultiLanOcr_Japanese": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Japanese/Japanese_150.tsv",
85
+ "CCOCR_MultiLanOcr_Korean": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Korean/Korean_150.tsv",
86
+ "CCOCR_MultiLanOcr_Portuguese": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Portuguese/Portuguese_150.tsv",
87
+ "CCOCR_MultiLanOcr_Russian": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Russian/Russian_150.tsv",
88
+ "CCOCR_MultiLanOcr_Spanish": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Spanish/Spanish_150.tsv",
89
+ "CCOCR_MultiLanOcr_Vietnamese": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_lan_ocr/Vietnamese/Vietnamese_150.tsv",
90
+ "CCOCR_MultiSceneOcr_Cord": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/CORD_100.tsv",
91
+ "CCOCR_MultiSceneOcr_Funsd": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/FUNSD_50.tsv",
92
+ "CCOCR_MultiSceneOcr_Iam": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/IAM_50.tsv",
93
+ "CCOCR_MultiSceneOcr_ZhDoc": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/zh_doc_100.tsv",
94
+ "CCOCR_MultiSceneOcr_ZhHandwriting": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/document_text/zh_handwriting_50.tsv",
95
+ "CCOCR_MultiSceneOcr_Hieragent": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/Hieragent_100.tsv",
96
+ "CCOCR_MultiSceneOcr_Ic15": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/IC15_500.tsv",
97
+ "CCOCR_MultiSceneOcr_Inversetext": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/InverseText_500.tsv",
98
+ "CCOCR_MultiSceneOcr_Totaltext": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/TotalText_300.tsv",
99
+ "CCOCR_MultiSceneOcr_ZhScene": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/scene_text/zh_scene_450.tsv",
100
+ "CCOCR_MultiSceneOcr_UgcLaion": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/ugc_text/ugc_laion_400.tsv",
101
+ "CCOCR_MultiSceneOcr_ZhDense": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/ugc_text/zh_dense_50.tsv",
102
+ "CCOCR_MultiSceneOcr_ZhVertical": "https://huggingface.co/datasets/wulipc/CC-OCR/resolve/main/multi_scene_ocr/ugc_text/zh_vertical_100.tsv",
103
+ "CCOCR": "http://opencompass.openxlab.space/utils/VLMEval/CCOCR.tsv"
104
+ }
105
+
106
+ # define data path
107
+ DATASET_URL = DATASET_URL_MODELSCOPE
108
+ DATASET_MD5 = {
109
+ "CCOCR_DocParsing_DocPhotoChn": "9039dcbb31830d413261a95cfa29d97f",
110
+ "CCOCR_DocParsing_DocPhotoEng": "2ca0824881e1d7317626f2a19d902989",
111
+ "CCOCR_DocParsing_DocScanChn": "9e265c8aa760ebdf5c3bf9e892d55492",
112
+ "CCOCR_DocParsing_DocScanEng": "77d04637be3def86dbc2ce37ba64a704",
113
+ "CCOCR_DocParsing_TablePhotoChn": "c4dc85252ddad2b43a03a67b1d1ae983",
114
+ "CCOCR_DocParsing_TablePhotoEng": "02ab75d6169da0cd2ece9ce0ae14a479",
115
+ "CCOCR_DocParsing_TableScanChn": "f1f79959fdd01127df7377c9d46722f2",
116
+ "CCOCR_DocParsing_TableScanEng": "794903c7acf52bfe956eefba2166d14b",
117
+ "CCOCR_DocParsing_MolecularHandwriting": "30b7f7679b713ce000a939eca7b4078f",
118
+ "CCOCR_DocParsing_FormulaHandwriting": "e03047776ce5e79a61ae1c057e2a348e",
119
+ "CCOCR_Kie_Sroie2019Word": "3287d99a8e86a99b74171fa5a70f9acb",
120
+ "CCOCR_Kie_Cord": "ab297cadcbc7158884a301c366f3330a",
121
+ "CCOCR_Kie_EphoieScut": "bb8fa3ba7ea91cbf17be0904956ad3f3",
122
+ "CCOCR_Kie_Poie": "882b64317989ecbfed6518051cdffb14",
123
+ "CCOCR_Kie_ColdSibr": "109d5dad8b7081fb6a2f088e963196d4",
124
+ "CCOCR_Kie_ColdCell": "7b44c45b4d7d768d1dbdc08872fe7d3a",
125
+ "CCOCR_MultiLanOcr_Arabic": "e9a3f2bb9298d0b882ebc7a98980c3f3",
126
+ "CCOCR_MultiLanOcr_French": "729407ed2036c22e602eff645eddd40c",
127
+ "CCOCR_MultiLanOcr_German": "96fc2edae747f0ec95b0a6f9bf723022",
128
+ "CCOCR_MultiLanOcr_Italian": "29a508fa5d5a5e767497dd69e2430ebb",
129
+ "CCOCR_MultiLanOcr_Japanese": "bbcca96ccf25fff63597c2ab4f3ebb1f",
130
+ "CCOCR_MultiLanOcr_Korean": "0f55dbd24eba5edc189c91e124411641",
131
+ "CCOCR_MultiLanOcr_Portuguese": "a6fcf8831775a61aa631c0cf1c422ae7",
132
+ "CCOCR_MultiLanOcr_Russian": "19d2f84062a1699d3e9333912bd6b303",
133
+ "CCOCR_MultiLanOcr_Spanish": "f5a0cfa9f2ae4115c91c7b362034e591",
134
+ "CCOCR_MultiLanOcr_Vietnamese": "bf1cd4e83d91767f4906f81550cec8b9",
135
+ "CCOCR_MultiSceneOcr_Cord": "92943f0ccb4c5a196c574222e76759a0",
136
+ "CCOCR_MultiSceneOcr_Funsd": "229cc38d193edd00f4383610e98ee873",
137
+ "CCOCR_MultiSceneOcr_Iam": "d897a6d6c3880c65e752ec11b211204c",
138
+ "CCOCR_MultiSceneOcr_ZhDoc": "303682cc16c8bb51b2b896f8ceb8bd38",
139
+ "CCOCR_MultiSceneOcr_ZhHandwriting": "faa298d366bc05e5cfb39e334afb8eff",
140
+ "CCOCR_MultiSceneOcr_Hieragent": "6f132cdd0473d7cc145c3e3a08957dd6",
141
+ "CCOCR_MultiSceneOcr_Ic15": "3d94869f312a41d53d0578a06a2fb1f2",
142
+ "CCOCR_MultiSceneOcr_Inversetext": "e141d424a0c4cf9579064428a270f13d",
143
+ "CCOCR_MultiSceneOcr_Totaltext": "ca1daf81d49eeb57ef844b72a23c2e62",
144
+ "CCOCR_MultiSceneOcr_ZhScene": "9295152a66e6f117db8bfbb20a9013e6",
145
+ "CCOCR_MultiSceneOcr_UgcLaion": "8e9ea1fbf9d56532157e807eabf39b21",
146
+ "CCOCR_MultiSceneOcr_ZhDense": "de8f48ee0c8a2cf8ed7f2b3a81e6322d",
147
+ "CCOCR_MultiSceneOcr_ZhVertical": "4892b4aec6e7fd11e39aaea23712709b",
148
+ "CCOCR": "f8927b76510ffe04e59d45e3f8e8b620"
149
+ }
150
+
151
+ def _evaluate_single_dataset(self, sub_df, data_name, **judge_kwargs):
152
+ """
153
+ Evaluate a single sub-dataset from the combined CCOCR tsv
154
+ """
155
+ dict_list = sub_df.to_dict(orient='records')
156
+
157
+ gt_info, ptd_info = {}, {}
158
+ for data_info in dict_list:
159
+ image_name = data_info['image_name']
160
+ gt_info[image_name] = data_info['answer']
161
+
162
+ # warning the FAIL samples
163
+ if data_info['prediction'] != FAIL_MSG:
164
+ ptd_info[image_name] = data_info['prediction']
165
+
166
+ # Extract metadata from the sub-dataset
167
+ group_name = str(sub_df['category'].iloc[0])
168
+ op_name = str(sub_df['l2-category'].iloc[0])
169
+
170
+ data_info = {"op": op_name, "group": group_name, "dataset": data_name, "num": len(gt_info)}
171
+
172
+ try:
173
+ from .utils.ccocr_evaluator import evaluator_map_info as ccocr_evaluator_map
174
+ except ImportError as err:
175
+ import warnings
176
+ warnings.warn('The dependency of CCOCR evaluator is not properly installed')
177
+ warnings.warn(f'{type(err)}: {err}')
178
+ return None, None
179
+
180
+ eval_func = ccocr_evaluator_map.get(group_name, None)
181
+ if eval_func is None:
182
+ print(f"Warning: evaluator not defined for: {group_name}")
183
+ return None, None
184
+
185
+ meta_info, eval_info = eval_func(ptd_info, gt_info, **data_info)
186
+
187
+ return {"meta": meta_info, "evaluation": eval_info, "config": data_info}, eval_info.get("summary")
188
+
189
+ # It returns a DataFrame
190
+ def evaluate(self, eval_file, **judge_kwargs):
191
+ """
192
+ Evaluate the combined CCOCR dataset containing all sub-datasets
193
+ """
194
+ df = load(eval_file)
195
+ df['prediction'] = [str(x) for x in df['prediction']]
196
+ required_colume_list = ['answer', 'prediction', "category", "image_name", "l2-category", "split"]
197
+ for required_colume in required_colume_list:
198
+ assert required_colume in df, "required_colume: {} NOT found".format(required_colume)
199
+
200
+ # Create unique sub-dataset identifiers using category, l2-category, and split
201
+ df['sub_dataset_id'] = df['category'].astype(str) + '_' + df['l2-category'].astype(str) + '_' + df['split'].astype(str)
202
+
203
+ # Get all unique sub-datasets from the combined identifier
204
+ unique_sub_datasets = df['sub_dataset_id'].unique()
205
+
206
+ all_results = {}
207
+ all_summaries = {}
208
+
209
+ # Process each sub-dataset separately
210
+ for sub_dataset_id in tqdm(unique_sub_datasets, desc="Processing sub-datasets"):
211
+ print(f"Processing sub-dataset: {sub_dataset_id}")
212
+
213
+ # Filter data for this specific sub-dataset
214
+ sub_df = df[df['sub_dataset_id'] == sub_dataset_id].copy()
215
+
216
+ if len(sub_df) == 0:
217
+ print(f"Warning: No data found for sub-dataset: {sub_dataset_id}")
218
+ continue
219
+
220
+ # Get the original split name for compatibility (use the split value)
221
+ split_name = sub_df['split'].iloc[0]
222
+
223
+ # Evaluate this sub-dataset
224
+ result_info, summary = self._evaluate_single_dataset(sub_df, split_name, **judge_kwargs)
225
+
226
+ if result_info is not None:
227
+ all_results[sub_dataset_id] = result_info
228
+ all_summaries[sub_dataset_id] = summary
229
+ print(f"Completed evaluation for {sub_dataset_id}: {summary}")
230
+ else:
231
+ print(f"Failed to evaluate {sub_dataset_id}")
232
+
233
+ # Save comprehensive results
234
+ result_file = get_intermediate_file_path(eval_file, '_comprehensive_eval', 'json')
235
+ comprehensive_result = {
236
+ "meta": {"total_datasets": len(all_results), "datasets": list(all_results.keys())},
237
+ "results": all_results,
238
+ "summaries": all_summaries
239
+ }
240
+ dump(comprehensive_result, result_file)
241
+ print(f"Comprehensive results saved to: {result_file}")
242
+
243
+ # Final Aggregation Logic
244
+ lan_ocr_scores = []
245
+ scene_ocr_scores = []
246
+ kie_scores = []
247
+ doc_parsing_scores = []
248
+
249
+ for key, summary in all_summaries.items():
250
+ if not isinstance(summary, dict):
251
+ continue
252
+
253
+ if 'lan_ocr' in key:
254
+ if 'macro_f1_score' in summary:
255
+ lan_ocr_scores.append(summary['macro_f1_score'])
256
+ elif 'scene_ocr' in key:
257
+ if 'macro_f1_score' in summary:
258
+ scene_ocr_scores.append(summary['macro_f1_score'])
259
+ elif 'kie' in key:
260
+ if 'acc' in summary:
261
+ kie_scores.append(summary['acc'])
262
+ elif 'doc_parsing' in key:
263
+ if 'score' in summary:
264
+ doc_parsing_scores.append(summary['score'])
265
+
266
+ res = {}
267
+ category_averages = []
268
+
269
+ if lan_ocr_scores:
270
+ avg = sum(lan_ocr_scores) / len(lan_ocr_scores)
271
+ res['lan_ocr'] = avg
272
+ category_averages.append(avg)
273
+
274
+ if scene_ocr_scores:
275
+ avg = sum(scene_ocr_scores) / len(scene_ocr_scores)
276
+ res['scene_ocr'] = avg
277
+ category_averages.append(avg)
278
+
279
+ if kie_scores:
280
+ avg = sum(kie_scores) / len(kie_scores)
281
+ res['kie'] = avg
282
+ category_averages.append(avg)
283
+
284
+ if doc_parsing_scores:
285
+ avg = sum(doc_parsing_scores) / len(doc_parsing_scores)
286
+ res['doc_parsing'] = avg
287
+ category_averages.append(avg)
288
+
289
+ if category_averages:
290
+ res['total'] = sum(category_averages) / len(category_averages)
291
+ else:
292
+ res['total'] = 0
293
+
294
+ print("\n" + "="*80)
295
+ print("Final Aggregated Results:")
296
+ print("="*80)
297
+ for k, v in res.items():
298
+ print(f" {k.upper():<20}: {v:.4f}")
299
+ print("="*80)
300
+ df = d2df(res)
301
+ score_file = get_intermediate_file_path(eval_file, '_acc', 'csv')
302
+ dump(df, score_file)
303
+ return res
VLMEvalKit-sudoku/vlmeval/dataset/mmgenbench.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import pandas as pd
3
+ from abc import abstractmethod
4
+ from ..smp import *
5
+ from .image_base import ImageBaseDataset
6
+
7
+
8
+ class MMGenBench(ImageBaseDataset):
9
+
10
+ prompt_list = [
11
+ """
12
+ # Role
13
+ You are an expert in the field of image understanding, focusing on the \
14
+ understanding of images and generating the image caption-prompt.
15
+
16
+ # Definition Explanation
17
+ image caption-prompt: Refers to the caption or description of an image, \
18
+ used to provide to a Text-to-Image model to generate a new image.
19
+ Text-to-Image model: Can generate a new image based on the provided image \
20
+ caption-prompt, such as stable diffusion 3, flux, and other image generation models.
21
+
22
+ # Task Description
23
+ Generate an image caption-prompt based on the input image.
24
+
25
+ # Key Points and Requirements
26
+ 1. Accurately understand the input image and precisely generate an image caption-prompt.
27
+ 2. The generated image caption-prompt, when provided to the Text-to-Image model, requires the \
28
+ Text-to-Image model to generate a new image that is as consistent as possible with the input image.
29
+ 3. The generated image caption-prompt must conform to the preferences of the Text-to-Image model.
30
+ 4. The generated image caption-prompt should describe the input image in as much \
31
+ detail as possible, and it should be between 20 to 60 words.
32
+
33
+ # Output Format
34
+ A string, that is the image caption-prompt. No extra output needed.
35
+ """
36
+ ]
37
+ TYPE = 'GenerateImgPrompt'
38
+ DATASET_URL = {
39
+ 'MMGenBench-Test': 'https://huggingface.co/datasets/lerogo/MMGenBench/resolve/main/MMGenBench-Test.tsv',
40
+ 'MMGenBench-Domain': 'https://huggingface.co/datasets/lerogo/MMGenBench/resolve/main/MMGenBench-Domain.tsv',
41
+ }
42
+ PROMPT_MAP = {
43
+ 'MMGenBench-Test': prompt_list[0],
44
+ 'MMGenBench-Domain': prompt_list[0],
45
+ }
46
+ DATASET_MD5 = {
47
+ 'MMGenBench-Test': "94f8dac6bbf7c20be403f99adeaa73da",
48
+ 'MMGenBench-Domain': "5c10daf6e2c5f08bdfb0701aa6db86bb",
49
+ }
50
+
51
+ def __init__(self, dataset='MMGenBench', **kwargs):
52
+ super().__init__(dataset, **kwargs)
53
+ warnings.warn('This dataset is for inference only and does not support direct output of evaluation results.\n')
54
+ warnings.warn('Please refer to "https://github.com/lerogo/MMGenBench" for more evaluation information.\n')
55
+
56
+ def load_data(self, dataset):
57
+ data = super().load_data(dataset)
58
+ if 'question' not in data:
59
+ data['question'] = [(
60
+ self.PROMPT_MAP[dataset]
61
+ )] * len(data)
62
+ return data
63
+
64
+ # Given the prediction file, return the evaluation results in the format of a dictionary or pandas dataframe
65
+ @abstractmethod
66
+ def evaluate(self, eval_file, **judge_kwargs):
67
+ warnings.warn('This evaluation method is not supported.\n')
68
+ warnings.warn('Please refer to "https://github.com/lerogo/MMGenBench" for more evaluation information.\n')
69
+ return None