HarborYuan commited on
Commit
e625c8d
·
verified ·
1 Parent(s): 2731ad6

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. __pycache__/configuration_intern_vit.cpython-310.pyc +0 -0
  3. __pycache__/configuration_intern_vit.cpython-311.pyc +0 -0
  4. __pycache__/configuration_intern_vit.cpython-39.pyc +0 -0
  5. __pycache__/configuration_internlm2.cpython-310.pyc +0 -0
  6. __pycache__/configuration_internlm2.cpython-311.pyc +0 -0
  7. __pycache__/configuration_internlm2.cpython-39.pyc +0 -0
  8. __pycache__/configuration_phi3.cpython-310.pyc +0 -0
  9. __pycache__/configuration_phi3.cpython-311.pyc +0 -0
  10. __pycache__/configuration_phi3.cpython-39.pyc +0 -0
  11. __pycache__/configuration_sa2va_chat.cpython-310.pyc +0 -0
  12. __pycache__/configuration_sa2va_chat.cpython-311.pyc +0 -0
  13. __pycache__/configuration_sa2va_chat.cpython-39.pyc +0 -0
  14. __pycache__/flash_attention.cpython-310.pyc +0 -0
  15. __pycache__/flash_attention.cpython-311.pyc +0 -0
  16. __pycache__/flash_attention.cpython-39.pyc +0 -0
  17. __pycache__/modeling_intern_vit.cpython-310.pyc +0 -0
  18. __pycache__/modeling_intern_vit.cpython-311.pyc +0 -0
  19. __pycache__/modeling_intern_vit.cpython-39.pyc +0 -0
  20. __pycache__/modeling_internlm2.cpython-310.pyc +0 -0
  21. __pycache__/modeling_internlm2.cpython-311.pyc +0 -0
  22. __pycache__/modeling_internlm2.cpython-39.pyc +0 -0
  23. __pycache__/modeling_phi3.cpython-310.pyc +0 -0
  24. __pycache__/modeling_phi3.cpython-311.pyc +0 -0
  25. __pycache__/modeling_phi3.cpython-39.pyc +0 -0
  26. __pycache__/modeling_sa2va_chat.cpython-310.pyc +0 -0
  27. __pycache__/modeling_sa2va_chat.cpython-311.pyc +0 -0
  28. __pycache__/modeling_sa2va_chat.cpython-39.pyc +0 -0
  29. __pycache__/sam2.cpython-310.pyc +0 -0
  30. __pycache__/sam2.cpython-311.pyc +3 -0
  31. __pycache__/sam2.cpython-39.pyc +0 -0
  32. __pycache__/templates.cpython-310.pyc +0 -0
  33. __pycache__/templates.cpython-311.pyc +0 -0
  34. __pycache__/templates.cpython-39.pyc +0 -0
  35. added_tokens.json +38 -0
  36. config.json +194 -0
  37. configuration_intern_vit.py +120 -0
  38. configuration_internlm2.py +150 -0
  39. configuration_phi3.py +211 -0
  40. configuration_sa2va_chat.py +107 -0
  41. flash_attention.py +76 -0
  42. generation_config.json +4 -0
  43. merges.txt +0 -0
  44. model-00001-of-00004.safetensors +3 -0
  45. model-00002-of-00004.safetensors +3 -0
  46. model-00003-of-00004.safetensors +3 -0
  47. model-00004-of-00004.safetensors +3 -0
  48. model.safetensors.index.json +0 -0
  49. modeling_intern_vit.py +364 -0
  50. modeling_internlm2.py +1429 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ __pycache__/sam2.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
__pycache__/configuration_intern_vit.cpython-310.pyc ADDED
Binary file (5.03 kB). View file
 
__pycache__/configuration_intern_vit.cpython-311.pyc ADDED
Binary file (6.04 kB). View file
 
__pycache__/configuration_intern_vit.cpython-39.pyc ADDED
Binary file (4.98 kB). View file
 
__pycache__/configuration_internlm2.cpython-310.pyc ADDED
Binary file (5.54 kB). View file
 
__pycache__/configuration_internlm2.cpython-311.pyc ADDED
Binary file (6.73 kB). View file
 
__pycache__/configuration_internlm2.cpython-39.pyc ADDED
Binary file (5.49 kB). View file
 
__pycache__/configuration_phi3.cpython-310.pyc ADDED
Binary file (8.67 kB). View file
 
__pycache__/configuration_phi3.cpython-311.pyc ADDED
Binary file (10.9 kB). View file
 
__pycache__/configuration_phi3.cpython-39.pyc ADDED
Binary file (8.64 kB). View file
 
__pycache__/configuration_sa2va_chat.cpython-310.pyc ADDED
Binary file (3.16 kB). View file
 
__pycache__/configuration_sa2va_chat.cpython-311.pyc ADDED
Binary file (5.18 kB). View file
 
__pycache__/configuration_sa2va_chat.cpython-39.pyc ADDED
Binary file (3.12 kB). View file
 
__pycache__/flash_attention.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
__pycache__/flash_attention.cpython-311.pyc ADDED
Binary file (4.04 kB). View file
 
__pycache__/flash_attention.cpython-39.pyc ADDED
Binary file (2.69 kB). View file
 
__pycache__/modeling_intern_vit.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
__pycache__/modeling_intern_vit.cpython-311.pyc ADDED
Binary file (24.4 kB). View file
 
__pycache__/modeling_intern_vit.cpython-39.pyc ADDED
Binary file (12.8 kB). View file
 
__pycache__/modeling_internlm2.cpython-310.pyc ADDED
Binary file (42.9 kB). View file
 
__pycache__/modeling_internlm2.cpython-311.pyc ADDED
Binary file (71.6 kB). View file
 
__pycache__/modeling_internlm2.cpython-39.pyc ADDED
Binary file (42.6 kB). View file
 
__pycache__/modeling_phi3.cpython-310.pyc ADDED
Binary file (44.2 kB). View file
 
__pycache__/modeling_phi3.cpython-311.pyc ADDED
Binary file (77.3 kB). View file
 
__pycache__/modeling_phi3.cpython-39.pyc ADDED
Binary file (43.9 kB). View file
 
__pycache__/modeling_sa2va_chat.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
__pycache__/modeling_sa2va_chat.cpython-311.pyc ADDED
Binary file (47 kB). View file
 
__pycache__/modeling_sa2va_chat.cpython-39.pyc ADDED
Binary file (22.6 kB). View file
 
__pycache__/sam2.cpython-310.pyc ADDED
Binary file (94.8 kB). View file
 
__pycache__/sam2.cpython-311.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3697bcaee1fc4072e4bbd1c74e09d5920b960e777b5223cb42bb6aea9337f264
3
+ size 172618
__pycache__/sam2.cpython-39.pyc ADDED
Binary file (94.1 kB). View file
 
__pycache__/templates.cpython-310.pyc ADDED
Binary file (3.85 kB). View file
 
__pycache__/templates.cpython-311.pyc ADDED
Binary file (5.2 kB). View file
 
__pycache__/templates.cpython-39.pyc ADDED
Binary file (3.67 kB). View file
 
added_tokens.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 151673,
3
+ "</img>": 151666,
4
+ "</p>": 151676,
5
+ "</quad>": 151669,
6
+ "</ref>": 151671,
7
+ "</tool_call>": 151658,
8
+ "</vp>": 151678,
9
+ "<IMG_CONTEXT>": 151667,
10
+ "<box>": 151672,
11
+ "<img>": 151665,
12
+ "<p>": 151675,
13
+ "<quad>": 151668,
14
+ "<ref>": 151670,
15
+ "<tool_call>": 151657,
16
+ "<vp>": 151677,
17
+ "<|box_end|>": 151649,
18
+ "<|box_start|>": 151648,
19
+ "<|endoftext|>": 151643,
20
+ "<|file_sep|>": 151664,
21
+ "<|fim_middle|>": 151660,
22
+ "<|fim_pad|>": 151662,
23
+ "<|fim_prefix|>": 151659,
24
+ "<|fim_suffix|>": 151661,
25
+ "<|im_end|>": 151645,
26
+ "<|im_start|>": 151644,
27
+ "<|image_pad|>": 151655,
28
+ "<|object_ref_end|>": 151647,
29
+ "<|object_ref_start|>": 151646,
30
+ "<|quad_end|>": 151651,
31
+ "<|quad_start|>": 151650,
32
+ "<|repo_name|>": 151663,
33
+ "<|video_pad|>": 151656,
34
+ "<|vision_end|>": 151653,
35
+ "<|vision_pad|>": 151654,
36
+ "<|vision_start|>": 151652,
37
+ "[SEG]": 151674
38
+ }
config.json ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "architectures": [
4
+ "Sa2VAChatModel"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_sa2va_chat.Sa2VAChatConfig",
8
+ "AutoModel": "modeling_sa2va_chat.Sa2VAChatModel",
9
+ "AutoModelForCausalLM": "modeling_sa2va_chat.Sa2VAChatModel"
10
+ },
11
+ "downsample_ratio": 0.5,
12
+ "dynamic_image_size": true,
13
+ "force_image_size": 448,
14
+ "hidden_size": 2048,
15
+ "llm_config": {
16
+ "_name_or_path": "Qwen/Qwen2.5-3B-Instruct",
17
+ "add_cross_attention": false,
18
+ "architectures": [
19
+ "Qwen2ForCausalLM"
20
+ ],
21
+ "attention_dropout": 0.0,
22
+ "bad_words_ids": null,
23
+ "begin_suppress_tokens": null,
24
+ "bos_token_id": 151643,
25
+ "chunk_size_feed_forward": 0,
26
+ "cross_attention_hidden_size": null,
27
+ "decoder_start_token_id": null,
28
+ "diversity_penalty": 0.0,
29
+ "do_sample": false,
30
+ "early_stopping": false,
31
+ "encoder_no_repeat_ngram_size": 0,
32
+ "eos_token_id": 151645,
33
+ "exponential_decay_length_penalty": null,
34
+ "finetuning_task": null,
35
+ "forced_bos_token_id": null,
36
+ "forced_eos_token_id": null,
37
+ "hidden_act": "silu",
38
+ "hidden_size": 2048,
39
+ "id2label": {
40
+ "0": "LABEL_0",
41
+ "1": "LABEL_1"
42
+ },
43
+ "initializer_range": 0.02,
44
+ "intermediate_size": 11008,
45
+ "is_decoder": false,
46
+ "is_encoder_decoder": false,
47
+ "label2id": {
48
+ "LABEL_0": 0,
49
+ "LABEL_1": 1
50
+ },
51
+ "length_penalty": 1.0,
52
+ "max_length": 20,
53
+ "max_position_embeddings": 32768,
54
+ "max_window_layers": 70,
55
+ "min_length": 0,
56
+ "model_type": "qwen2",
57
+ "no_repeat_ngram_size": 0,
58
+ "num_attention_heads": 16,
59
+ "num_beam_groups": 1,
60
+ "num_beams": 1,
61
+ "num_hidden_layers": 36,
62
+ "num_key_value_heads": 2,
63
+ "num_return_sequences": 1,
64
+ "output_attentions": false,
65
+ "output_hidden_states": false,
66
+ "output_scores": false,
67
+ "pad_token_id": null,
68
+ "prefix": null,
69
+ "problem_type": null,
70
+ "pruned_heads": {},
71
+ "remove_invalid_values": false,
72
+ "repetition_penalty": 1.0,
73
+ "return_dict": true,
74
+ "return_dict_in_generate": false,
75
+ "rms_norm_eps": 1e-06,
76
+ "rope_theta": 1000000.0,
77
+ "sep_token_id": null,
78
+ "sliding_window": 32768,
79
+ "suppress_tokens": null,
80
+ "task_specific_params": null,
81
+ "temperature": 1.0,
82
+ "tf_legacy_loss": false,
83
+ "tie_encoder_decoder": false,
84
+ "tie_word_embeddings": false,
85
+ "tokenizer_class": null,
86
+ "top_k": 50,
87
+ "top_p": 1.0,
88
+ "torch_dtype": "bfloat16",
89
+ "torchscript": false,
90
+ "transformers_version": "4.42.3",
91
+ "typical_p": 1.0,
92
+ "use_bfloat16": true,
93
+ "use_cache": true,
94
+ "use_sliding_window": false,
95
+ "vocab_size": 151679
96
+ },
97
+ "max_dynamic_patch": 12,
98
+ "min_dynamic_patch": 1,
99
+ "model_type": "sa2va_chat",
100
+ "pad2square": false,
101
+ "ps_version": "v2",
102
+ "select_layer": -1,
103
+ "template": "qwen_chat",
104
+ "tie_word_embeddings": false,
105
+ "torch_dtype": "bfloat16",
106
+ "transformers_version": null,
107
+ "use_backbone_lora": 0,
108
+ "use_llm_lora": 0,
109
+ "use_thumbnail": true,
110
+ "vision_config": {
111
+ "_name_or_path": "",
112
+ "add_cross_attention": false,
113
+ "architectures": [
114
+ "InternVisionModel"
115
+ ],
116
+ "attention_dropout": 0.0,
117
+ "bad_words_ids": null,
118
+ "begin_suppress_tokens": null,
119
+ "bos_token_id": null,
120
+ "chunk_size_feed_forward": 0,
121
+ "cross_attention_hidden_size": null,
122
+ "decoder_start_token_id": null,
123
+ "diversity_penalty": 0.0,
124
+ "do_sample": false,
125
+ "drop_path_rate": 0.0,
126
+ "dropout": 0.0,
127
+ "early_stopping": false,
128
+ "encoder_no_repeat_ngram_size": 0,
129
+ "eos_token_id": null,
130
+ "exponential_decay_length_penalty": null,
131
+ "finetuning_task": null,
132
+ "forced_bos_token_id": null,
133
+ "forced_eos_token_id": null,
134
+ "hidden_act": "gelu",
135
+ "hidden_size": 1024,
136
+ "id2label": {
137
+ "0": "LABEL_0",
138
+ "1": "LABEL_1"
139
+ },
140
+ "image_size": 448,
141
+ "initializer_factor": 1.0,
142
+ "initializer_range": 0.02,
143
+ "intermediate_size": 4096,
144
+ "is_decoder": false,
145
+ "is_encoder_decoder": false,
146
+ "label2id": {
147
+ "LABEL_0": 0,
148
+ "LABEL_1": 1
149
+ },
150
+ "layer_norm_eps": 1e-06,
151
+ "length_penalty": 1.0,
152
+ "max_length": 20,
153
+ "min_length": 0,
154
+ "model_type": "intern_vit_6b",
155
+ "no_repeat_ngram_size": 0,
156
+ "norm_type": "layer_norm",
157
+ "num_attention_heads": 16,
158
+ "num_beam_groups": 1,
159
+ "num_beams": 1,
160
+ "num_channels": 3,
161
+ "num_hidden_layers": 24,
162
+ "num_return_sequences": 1,
163
+ "output_attentions": false,
164
+ "output_hidden_states": false,
165
+ "output_scores": false,
166
+ "pad_token_id": null,
167
+ "patch_size": 14,
168
+ "prefix": null,
169
+ "problem_type": null,
170
+ "pruned_heads": {},
171
+ "qk_normalization": false,
172
+ "qkv_bias": true,
173
+ "remove_invalid_values": false,
174
+ "repetition_penalty": 1.0,
175
+ "return_dict": true,
176
+ "return_dict_in_generate": false,
177
+ "sep_token_id": null,
178
+ "suppress_tokens": null,
179
+ "task_specific_params": null,
180
+ "temperature": 1.0,
181
+ "tf_legacy_loss": false,
182
+ "tie_encoder_decoder": false,
183
+ "tie_word_embeddings": true,
184
+ "tokenizer_class": null,
185
+ "top_k": 50,
186
+ "top_p": 1.0,
187
+ "torch_dtype": "bfloat16",
188
+ "torchscript": false,
189
+ "transformers_version": "4.42.3",
190
+ "typical_p": 1.0,
191
+ "use_bfloat16": true,
192
+ "use_flash_attn": true
193
+ }
194
+ }
configuration_intern_vit.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import os
8
+ from typing import Union
9
+
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class InternVisionConfig(PretrainedConfig):
17
+ r"""
18
+ This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
19
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
20
+
21
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
22
+ documentation from [`PretrainedConfig`] for more information.
23
+
24
+ Args:
25
+ num_channels (`int`, *optional*, defaults to 3):
26
+ Number of color channels in the input images (e.g., 3 for RGB).
27
+ patch_size (`int`, *optional*, defaults to 14):
28
+ The size (resolution) of each patch.
29
+ image_size (`int`, *optional*, defaults to 224):
30
+ The size (resolution) of each image.
31
+ qkv_bias (`bool`, *optional*, defaults to `False`):
32
+ Whether to add a bias to the queries and values in the self-attention layers.
33
+ hidden_size (`int`, *optional*, defaults to 3200):
34
+ Dimensionality of the encoder layers and the pooler layer.
35
+ num_attention_heads (`int`, *optional*, defaults to 25):
36
+ Number of attention heads for each attention layer in the Transformer encoder.
37
+ intermediate_size (`int`, *optional*, defaults to 12800):
38
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
39
+ qk_normalization (`bool`, *optional*, defaults to `True`):
40
+ Whether to normalize the queries and keys in the self-attention layers.
41
+ num_hidden_layers (`int`, *optional*, defaults to 48):
42
+ Number of hidden layers in the Transformer encoder.
43
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
44
+ Whether to use flash attention mechanism.
45
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
46
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
47
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
48
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
49
+ The epsilon used by the layer normalization layers.
50
+ dropout (`float`, *optional*, defaults to 0.0):
51
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
52
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
53
+ Dropout rate for stochastic depth.
54
+ attention_dropout (`float`, *optional*, defaults to 0.0):
55
+ The dropout ratio for the attention probabilities.
56
+ initializer_range (`float`, *optional*, defaults to 0.02):
57
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
58
+ initializer_factor (`float`, *optional*, defaults to 0.1):
59
+ A factor for layer scale.
60
+ """
61
+
62
+ model_type = 'intern_vit_6b'
63
+
64
+ def __init__(
65
+ self,
66
+ num_channels=3,
67
+ patch_size=14,
68
+ image_size=224,
69
+ qkv_bias=False,
70
+ hidden_size=3200,
71
+ num_attention_heads=25,
72
+ intermediate_size=12800,
73
+ qk_normalization=True,
74
+ num_hidden_layers=48,
75
+ use_flash_attn=True,
76
+ hidden_act='gelu',
77
+ norm_type='rms_norm',
78
+ layer_norm_eps=1e-6,
79
+ dropout=0.0,
80
+ drop_path_rate=0.0,
81
+ attention_dropout=0.0,
82
+ initializer_range=0.02,
83
+ initializer_factor=0.1,
84
+ **kwargs,
85
+ ):
86
+ super().__init__(**kwargs)
87
+
88
+ self.hidden_size = hidden_size
89
+ self.intermediate_size = intermediate_size
90
+ self.dropout = dropout
91
+ self.drop_path_rate = drop_path_rate
92
+ self.num_hidden_layers = num_hidden_layers
93
+ self.num_attention_heads = num_attention_heads
94
+ self.num_channels = num_channels
95
+ self.patch_size = patch_size
96
+ self.image_size = image_size
97
+ self.initializer_range = initializer_range
98
+ self.initializer_factor = initializer_factor
99
+ self.attention_dropout = attention_dropout
100
+ self.layer_norm_eps = layer_norm_eps
101
+ self.hidden_act = hidden_act
102
+ self.norm_type = norm_type
103
+ self.qkv_bias = qkv_bias
104
+ self.qk_normalization = qk_normalization
105
+ self.use_flash_attn = use_flash_attn
106
+
107
+ @classmethod
108
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
109
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
110
+
111
+ if 'vision_config' in config_dict:
112
+ config_dict = config_dict['vision_config']
113
+
114
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
115
+ logger.warning(
116
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
117
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
118
+ )
119
+
120
+ return cls.from_dict(config_dict, **kwargs)
configuration_internlm2.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ InternLM2 model configuration"""
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24
+
25
+
26
+ # Modified from transformers.model.llama.configuration_llama.LlamaConfig
27
+ class InternLM2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
30
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 32000):
39
+ Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`InternLM2Model`]
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimension of the hidden representations.
43
+ intermediate_size (`int`, *optional*, defaults to 11008):
44
+ Dimension of the MLP representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 32):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_key_value_heads (`int`, *optional*):
50
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
51
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
52
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
53
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
54
+ by meanpooling all the original heads within that group. For more details checkout [this
55
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
56
+ `num_attention_heads`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
70
+ Whether to tie weight embeddings
71
+ Example:
72
+
73
+ """
74
+ model_type = 'internlm2'
75
+ _auto_class = 'AutoConfig'
76
+
77
+ def __init__( # pylint: disable=W0102
78
+ self,
79
+ vocab_size=103168,
80
+ hidden_size=4096,
81
+ intermediate_size=11008,
82
+ num_hidden_layers=32,
83
+ num_attention_heads=32,
84
+ num_key_value_heads=None,
85
+ hidden_act='silu',
86
+ max_position_embeddings=2048,
87
+ initializer_range=0.02,
88
+ rms_norm_eps=1e-6,
89
+ use_cache=True,
90
+ pad_token_id=0,
91
+ bos_token_id=1,
92
+ eos_token_id=2,
93
+ tie_word_embeddings=False,
94
+ bias=True,
95
+ rope_theta=10000,
96
+ rope_scaling=None,
97
+ attn_implementation='eager',
98
+ **kwargs,
99
+ ):
100
+ self.vocab_size = vocab_size
101
+ self.max_position_embeddings = max_position_embeddings
102
+ self.hidden_size = hidden_size
103
+ self.intermediate_size = intermediate_size
104
+ self.num_hidden_layers = num_hidden_layers
105
+ self.num_attention_heads = num_attention_heads
106
+ self.bias = bias
107
+
108
+ if num_key_value_heads is None:
109
+ num_key_value_heads = num_attention_heads
110
+ self.num_key_value_heads = num_key_value_heads
111
+
112
+ self.hidden_act = hidden_act
113
+ self.initializer_range = initializer_range
114
+ self.rms_norm_eps = rms_norm_eps
115
+ self.use_cache = use_cache
116
+ self.rope_theta = rope_theta
117
+ self.rope_scaling = rope_scaling
118
+ self._rope_scaling_validation()
119
+
120
+ self.attn_implementation = attn_implementation
121
+ if self.attn_implementation is None:
122
+ self.attn_implementation = 'eager'
123
+ super().__init__(
124
+ pad_token_id=pad_token_id,
125
+ bos_token_id=bos_token_id,
126
+ eos_token_id=eos_token_id,
127
+ tie_word_embeddings=tie_word_embeddings,
128
+ **kwargs,
129
+ )
130
+
131
+ def _rope_scaling_validation(self):
132
+ """
133
+ Validate the `rope_scaling` configuration.
134
+ """
135
+ if self.rope_scaling is None:
136
+ return
137
+
138
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
139
+ raise ValueError(
140
+ '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
141
+ f'got {self.rope_scaling}'
142
+ )
143
+ rope_scaling_type = self.rope_scaling.get('type', None)
144
+ rope_scaling_factor = self.rope_scaling.get('factor', None)
145
+ if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
146
+ raise ValueError(
147
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
148
+ )
149
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
150
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
configuration_phi3.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License atd
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """ Phi-3 model configuration"""
16
+
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ 'microsoft/Phi-3-mini-4k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json',
25
+ 'microsoft/Phi-3-mini-128k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json',
26
+ }
27
+
28
+
29
+ class Phi3Config(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
32
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
33
+ defaults will yield a similar configuration to that of the
34
+ [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 32064):
41
+ Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`Phi3Model`].
43
+ hidden_size (`int`, *optional*, defaults to 3072):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 8192):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer decoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer decoder.
51
+ num_key_value_heads (`int`, *optional*):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details checkout [this
57
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
58
+ `num_attention_heads`.
59
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
60
+ Dropout probability for mlp outputs.
61
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
62
+ The dropout ratio for the embeddings.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio after computing the attention scores.
65
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
66
+ The non-linear activation function (function or string) in the decoder.
67
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
68
+ The maximum sequence length that this model might ever be used with.
69
+ original_max_position_embeddings (`int`, *optional*, defaults to 4096):
70
+ The maximum sequence length that this model was trained with. This is used to determine the size of the
71
+ original RoPE embeddings when using long scaling.
72
+ initializer_range (`float`, *optional*, defaults to 0.02):
73
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
74
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
75
+ The epsilon value used for the RMSNorm.
76
+ use_cache (`bool`, *optional*, defaults to `True`):
77
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
78
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
79
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
80
+ Whether to tie weight embeddings
81
+ rope_theta (`float`, *optional*, defaults to 10000.0):
82
+ The base period of the RoPE embeddings.
83
+ rope_scaling (`dict`, *optional*):
84
+ The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
85
+ contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and
86
+ the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
87
+ divided by the number of attention heads divided by 2.
88
+ bos_token_id (`int`, *optional*, defaults to 1):
89
+ The id of the "beginning-of-sequence" token.
90
+ eos_token_id (`int`, *optional*, defaults to 32000):
91
+ The id of the "end-of-sequence" token.
92
+ pad_token_id (`int`, *optional*, defaults to 32000):
93
+ The id of the padding token.
94
+ sliding_window (`int`, *optional*):
95
+ Sliding window attention window size. If `None`, no sliding window is applied.
96
+
97
+ Example:
98
+
99
+ ```python
100
+ >>> from transformers import Phi3Model, Phi3Config
101
+
102
+ >>> # Initializing a Phi-3 style configuration
103
+ >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
104
+
105
+ >>> # Initializing a model from the configuration
106
+ >>> model = Phi3Model(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = 'phi3'
113
+ keys_to_ignore_at_inference = ['past_key_values']
114
+
115
+ def __init__(
116
+ self,
117
+ vocab_size=32064,
118
+ hidden_size=3072,
119
+ intermediate_size=8192,
120
+ num_hidden_layers=32,
121
+ num_attention_heads=32,
122
+ num_key_value_heads=None,
123
+ resid_pdrop=0.0,
124
+ embd_pdrop=0.0,
125
+ attention_dropout=0.0,
126
+ hidden_act='silu',
127
+ max_position_embeddings=4096,
128
+ original_max_position_embeddings=4096,
129
+ initializer_range=0.02,
130
+ rms_norm_eps=1e-5,
131
+ use_cache=True,
132
+ tie_word_embeddings=False,
133
+ rope_theta=10000.0,
134
+ rope_scaling=None,
135
+ bos_token_id=1,
136
+ eos_token_id=32000,
137
+ pad_token_id=32000,
138
+ sliding_window=None,
139
+ **kwargs,
140
+ ):
141
+ self.vocab_size = vocab_size
142
+ self.hidden_size = hidden_size
143
+ self.intermediate_size = intermediate_size
144
+ self.num_hidden_layers = num_hidden_layers
145
+ self.num_attention_heads = num_attention_heads
146
+
147
+ if num_key_value_heads is None:
148
+ num_key_value_heads = num_attention_heads
149
+
150
+ self.num_key_value_heads = num_key_value_heads
151
+ self.resid_pdrop = resid_pdrop
152
+ self.embd_pdrop = embd_pdrop
153
+ self.attention_dropout = attention_dropout
154
+ self.hidden_act = hidden_act
155
+ self.max_position_embeddings = max_position_embeddings
156
+ self.original_max_position_embeddings = original_max_position_embeddings
157
+ self.initializer_range = initializer_range
158
+ self.rms_norm_eps = rms_norm_eps
159
+ self.use_cache = use_cache
160
+ self.rope_theta = rope_theta
161
+ self.rope_scaling = rope_scaling
162
+ self._rope_scaling_validation()
163
+ self.sliding_window = sliding_window
164
+
165
+ super().__init__(
166
+ bos_token_id=bos_token_id,
167
+ eos_token_id=eos_token_id,
168
+ pad_token_id=pad_token_id,
169
+ tie_word_embeddings=tie_word_embeddings,
170
+ **kwargs,
171
+ )
172
+
173
+ def _rope_scaling_validation(self):
174
+ """
175
+ Validate the `rope_scaling` configuration.
176
+ """
177
+ if self.rope_scaling is None:
178
+ return
179
+
180
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
181
+ raise ValueError(
182
+ '`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, '
183
+ f'got {self.rope_scaling}'
184
+ )
185
+ rope_scaling_type = self.rope_scaling.get('type', None)
186
+ rope_scaling_short_factor = self.rope_scaling.get('short_factor', None)
187
+ rope_scaling_long_factor = self.rope_scaling.get('long_factor', None)
188
+ if rope_scaling_type is None or rope_scaling_type not in ['su', 'yarn']:
189
+ raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}")
190
+ if not (
191
+ isinstance(rope_scaling_short_factor, list)
192
+ and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
193
+ ):
194
+ raise ValueError(
195
+ f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
196
+ )
197
+ if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
198
+ raise ValueError(
199
+ f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
200
+ )
201
+ if not (
202
+ isinstance(rope_scaling_long_factor, list)
203
+ and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
204
+ ):
205
+ raise ValueError(
206
+ f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
207
+ )
208
+ if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
209
+ raise ValueError(
210
+ f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
211
+ )
configuration_sa2va_chat.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from .configuration_internlm2 import InternLM2Config
10
+ from .configuration_phi3 import Phi3Config
11
+ from transformers import AutoConfig, LlamaConfig, Qwen2Config
12
+ from transformers.configuration_utils import PretrainedConfig
13
+ from transformers.utils import logging
14
+
15
+ from .configuration_intern_vit import InternVisionConfig
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+
20
+ class Sa2VAChatConfig(PretrainedConfig):
21
+ model_type = 'sa2va_chat'
22
+ is_composition = True
23
+
24
+ def __init__(
25
+ self,
26
+ vision_config=None,
27
+ llm_config=None,
28
+ use_backbone_lora=0,
29
+ use_llm_lora=0,
30
+ pad2square=False,
31
+ select_layer=-1,
32
+ force_image_size=None,
33
+ downsample_ratio=0.5,
34
+ template=None,
35
+ dynamic_image_size=False,
36
+ use_thumbnail=False,
37
+ ps_version='v1',
38
+ min_dynamic_patch=1,
39
+ max_dynamic_patch=6,
40
+ **kwargs):
41
+ super().__init__(**kwargs)
42
+ if vision_config is None:
43
+ vision_config = {}
44
+ logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
45
+
46
+ if llm_config is None:
47
+ llm_config = {}
48
+ logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
49
+
50
+ self.vision_config = InternVisionConfig(**vision_config)
51
+
52
+ if llm_config['architectures'][0] == 'LlamaForCausalLM':
53
+ self.llm_config = LlamaConfig(**llm_config)
54
+ elif llm_config['architectures'][0] == 'InternLM2ForCausalLM':
55
+ self.llm_config = InternLM2Config(**llm_config)
56
+ elif llm_config['architectures'][0] == 'Phi3ForCausalLM':
57
+ self.llm_config = Phi3Config(**llm_config)
58
+ elif llm_config['architectures'][0] == 'Qwen2ForCausalLM':
59
+ self.llm_config = Qwen2Config(**llm_config)
60
+ else:
61
+ raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
62
+ self.use_backbone_lora = use_backbone_lora
63
+ self.use_llm_lora = use_llm_lora
64
+ self.pad2square = pad2square
65
+ self.select_layer = select_layer
66
+ self.force_image_size = force_image_size
67
+ self.downsample_ratio = downsample_ratio
68
+ self.template = template
69
+ self.dynamic_image_size = dynamic_image_size
70
+ self.use_thumbnail = use_thumbnail
71
+ self.ps_version = ps_version # pixel shuffle version
72
+ self.min_dynamic_patch = min_dynamic_patch
73
+ self.max_dynamic_patch = max_dynamic_patch
74
+
75
+ self.hidden_size = self.llm_config.hidden_size
76
+ self.tie_word_embeddings = False
77
+
78
+ logger.info(f'vision_select_layer: {self.select_layer}')
79
+ logger.info(f'ps_version: {self.ps_version}')
80
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
81
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
82
+
83
+ def to_dict(self):
84
+ """
85
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
86
+
87
+ Returns:
88
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
89
+ """
90
+ output = copy.deepcopy(self.__dict__)
91
+ output['vision_config'] = self.vision_config.to_dict()
92
+ output['llm_config'] = self.llm_config.to_dict()
93
+ output['model_type'] = self.__class__.model_type
94
+ output['use_backbone_lora'] = self.use_backbone_lora
95
+ output['use_llm_lora'] = self.use_llm_lora
96
+ output['pad2square'] = self.pad2square
97
+ output['select_layer'] = self.select_layer
98
+ output['force_image_size'] = self.force_image_size
99
+ output['downsample_ratio'] = self.downsample_ratio
100
+ output['template'] = self.template
101
+ output['dynamic_image_size'] = self.dynamic_image_size
102
+ output['use_thumbnail'] = self.use_thumbnail
103
+ output['ps_version'] = self.ps_version
104
+ output['min_dynamic_patch'] = self.min_dynamic_patch
105
+ output['max_dynamic_patch'] = self.max_dynamic_patch
106
+
107
+ return output
flash_attention.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/Dao-AILab/flash-attention/blob/v0.2.8/flash_attn/flash_attention.py
2
+ import torch
3
+ import torch.nn as nn
4
+ from einops import rearrange
5
+
6
+ try: # v1
7
+ from flash_attn.flash_attn_interface import \
8
+ flash_attn_unpadded_qkvpacked_func
9
+ except: # v2
10
+ from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
11
+
12
+ from flash_attn.bert_padding import pad_input, unpad_input
13
+
14
+
15
+ class FlashAttention(nn.Module):
16
+ """Implement the scaled dot product attention with softmax.
17
+ Arguments
18
+ ---------
19
+ softmax_scale: The temperature to use for the softmax attention.
20
+ (default: 1/sqrt(d_keys) where d_keys is computed at
21
+ runtime)
22
+ attention_dropout: The dropout rate to apply to the attention
23
+ (default: 0.0)
24
+ """
25
+
26
+ def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
27
+ super().__init__()
28
+ self.softmax_scale = softmax_scale
29
+ self.dropout_p = attention_dropout
30
+
31
+ def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
32
+ max_s=None, need_weights=False):
33
+ """Implements the multihead softmax attention.
34
+ Arguments
35
+ ---------
36
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
37
+ if unpadded: (nnz, 3, h, d)
38
+ key_padding_mask: a bool tensor of shape (B, S)
39
+ """
40
+ assert not need_weights
41
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
42
+ assert qkv.is_cuda
43
+
44
+ if cu_seqlens is None:
45
+ batch_size = qkv.shape[0]
46
+ seqlen = qkv.shape[1]
47
+ if key_padding_mask is None:
48
+ qkv = rearrange(qkv, 'b s ... -> (b s) ...')
49
+ max_s = seqlen
50
+ cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
51
+ device=qkv.device)
52
+ output = flash_attn_unpadded_qkvpacked_func(
53
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
54
+ softmax_scale=self.softmax_scale, causal=causal
55
+ )
56
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
57
+ else:
58
+ nheads = qkv.shape[-2]
59
+ x = rearrange(qkv, 'b s three h d -> b s (three h d)')
60
+ x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
61
+ x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
62
+ output_unpad = flash_attn_unpadded_qkvpacked_func(
63
+ x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
64
+ softmax_scale=self.softmax_scale, causal=causal
65
+ )
66
+ output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
67
+ indices, batch_size, seqlen),
68
+ 'b s (h d) -> b s h d', h=nheads)
69
+ else:
70
+ assert max_s is not None
71
+ output = flash_attn_unpadded_qkvpacked_func(
72
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
73
+ softmax_scale=self.softmax_scale, causal=causal
74
+ )
75
+
76
+ return output, None
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.42.3"
4
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68e21c77d769db86bba24eec6d64eae5f436670631befde2e4db41fb5606973c
3
+ size 4971473960
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d628826f9328d75bbe793898d5101436c105cbe525055d4982d50071f92ec89d
3
+ size 4932952216
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c46db7d4f31d8a444c9a97d6770b58ffa5c846ef039a7bf31b1ff463169190
3
+ size 4995688160
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:187f26a4266a7c08f080198574a24d8f069741b7108b96741b77b280582889c4
3
+ size 259328744
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_intern_vit.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ from typing import Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint
12
+ from einops import rearrange
13
+ from timm.models.layers import DropPath
14
+ from torch import nn
15
+ from transformers.activations import ACT2FN
16
+ from transformers.modeling_outputs import (BaseModelOutput,
17
+ BaseModelOutputWithPooling)
18
+ from transformers.modeling_utils import PreTrainedModel
19
+ from transformers.utils import logging
20
+
21
+ from .configuration_intern_vit import InternVisionConfig
22
+
23
+ try:
24
+ from .flash_attention import FlashAttention
25
+ has_flash_attn = True
26
+ except:
27
+ print('FlashAttention is not installed.')
28
+ has_flash_attn = False
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+
33
+ class InternRMSNorm(nn.Module):
34
+ def __init__(self, hidden_size, eps=1e-6):
35
+ super().__init__()
36
+ self.weight = nn.Parameter(torch.ones(hidden_size))
37
+ self.variance_epsilon = eps
38
+
39
+ def forward(self, hidden_states):
40
+ input_dtype = hidden_states.dtype
41
+ hidden_states = hidden_states.to(torch.float32)
42
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
43
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
44
+ return self.weight * hidden_states.to(input_dtype)
45
+
46
+
47
+ try:
48
+ from apex.normalization import FusedRMSNorm
49
+
50
+ InternRMSNorm = FusedRMSNorm # noqa
51
+
52
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
53
+ except ImportError:
54
+ # using the normal InternRMSNorm
55
+ pass
56
+ except Exception:
57
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
58
+ pass
59
+
60
+
61
+ NORM2FN = {
62
+ 'rms_norm': InternRMSNorm,
63
+ 'layer_norm': nn.LayerNorm,
64
+ }
65
+
66
+
67
+ class InternVisionEmbeddings(nn.Module):
68
+ def __init__(self, config: InternVisionConfig):
69
+ super().__init__()
70
+ self.config = config
71
+ self.embed_dim = config.hidden_size
72
+ self.image_size = config.image_size
73
+ self.patch_size = config.patch_size
74
+
75
+ self.class_embedding = nn.Parameter(
76
+ torch.randn(1, 1, self.embed_dim),
77
+ )
78
+
79
+ self.patch_embedding = nn.Conv2d(
80
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
81
+ )
82
+
83
+ self.num_patches = (self.image_size // self.patch_size) ** 2
84
+ self.num_positions = self.num_patches + 1
85
+
86
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
87
+
88
+ def _get_pos_embed(self, pos_embed, H, W):
89
+ target_dtype = pos_embed.dtype
90
+ pos_embed = pos_embed.float().reshape(
91
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
92
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
93
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
94
+ return pos_embed
95
+
96
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
97
+ target_dtype = self.patch_embedding.weight.dtype
98
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
99
+ batch_size, _, height, width = patch_embeds.shape
100
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
101
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
102
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
103
+ position_embedding = torch.cat([
104
+ self.position_embedding[:, :1, :],
105
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
106
+ ], dim=1)
107
+ embeddings = embeddings + position_embedding.to(target_dtype)
108
+ return embeddings
109
+
110
+
111
+ class InternAttention(nn.Module):
112
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
113
+
114
+ def __init__(self, config: InternVisionConfig):
115
+ super().__init__()
116
+ self.config = config
117
+ self.embed_dim = config.hidden_size
118
+ self.num_heads = config.num_attention_heads
119
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
120
+ if config.use_flash_attn and not has_flash_attn:
121
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
122
+ self.head_dim = self.embed_dim // self.num_heads
123
+ if self.head_dim * self.num_heads != self.embed_dim:
124
+ raise ValueError(
125
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
126
+ f' {self.num_heads}).'
127
+ )
128
+
129
+ self.scale = self.head_dim ** -0.5
130
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
131
+ self.attn_drop = nn.Dropout(config.attention_dropout)
132
+ self.proj_drop = nn.Dropout(config.dropout)
133
+
134
+ self.qk_normalization = config.qk_normalization
135
+
136
+ if self.qk_normalization:
137
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
138
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
139
+
140
+ if self.use_flash_attn:
141
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
142
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
143
+
144
+ def _naive_attn(self, x):
145
+ B, N, C = x.shape
146
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
147
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
148
+
149
+ if self.qk_normalization:
150
+ B_, H_, N_, D_ = q.shape
151
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
152
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
153
+
154
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
155
+ attn = attn.softmax(dim=-1)
156
+ attn = self.attn_drop(attn)
157
+
158
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
159
+ x = self.proj(x)
160
+ x = self.proj_drop(x)
161
+ return x
162
+
163
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
164
+ qkv = self.qkv(x)
165
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
166
+
167
+ if self.qk_normalization:
168
+ q, k, v = qkv.unbind(2)
169
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
170
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
171
+ qkv = torch.stack([q, k, v], dim=2)
172
+
173
+ context, _ = self.inner_attn(
174
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
175
+ )
176
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
177
+ outs = self.proj_drop(outs)
178
+ return outs
179
+
180
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
181
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
182
+ return x
183
+
184
+
185
+ class InternMLP(nn.Module):
186
+ def __init__(self, config: InternVisionConfig):
187
+ super().__init__()
188
+ self.config = config
189
+ self.act = ACT2FN[config.hidden_act]
190
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
191
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
192
+
193
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
194
+ hidden_states = self.fc1(hidden_states)
195
+ hidden_states = self.act(hidden_states)
196
+ hidden_states = self.fc2(hidden_states)
197
+ return hidden_states
198
+
199
+
200
+ class InternVisionEncoderLayer(nn.Module):
201
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
202
+ super().__init__()
203
+ self.embed_dim = config.hidden_size
204
+ self.intermediate_size = config.intermediate_size
205
+ self.norm_type = config.norm_type
206
+
207
+ self.attn = InternAttention(config)
208
+ self.mlp = InternMLP(config)
209
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
210
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
211
+
212
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
213
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
214
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
215
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
216
+
217
+ def forward(
218
+ self,
219
+ hidden_states: torch.Tensor,
220
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
221
+ """
222
+ Args:
223
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
224
+ """
225
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
226
+
227
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
228
+
229
+ return hidden_states
230
+
231
+
232
+ class InternVisionEncoder(nn.Module):
233
+ """
234
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
235
+ [`InternEncoderLayer`].
236
+
237
+ Args:
238
+ config (`InternConfig`):
239
+ The corresponding vision configuration for the `InternEncoder`.
240
+ """
241
+
242
+ def __init__(self, config: InternVisionConfig):
243
+ super().__init__()
244
+ self.config = config
245
+ # stochastic depth decay rule
246
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
247
+ self.layers = nn.ModuleList([
248
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
249
+ self.gradient_checkpointing = True
250
+
251
+ def forward(
252
+ self,
253
+ inputs_embeds,
254
+ output_hidden_states: Optional[bool] = None,
255
+ return_dict: Optional[bool] = None,
256
+ ) -> Union[Tuple, BaseModelOutput]:
257
+ r"""
258
+ Args:
259
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
260
+ Embedded representation of the inputs. Should be float, not int tokens.
261
+ output_hidden_states (`bool`, *optional*):
262
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
263
+ for more detail.
264
+ return_dict (`bool`, *optional*):
265
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
266
+ """
267
+ output_hidden_states = (
268
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
269
+ )
270
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
271
+
272
+ encoder_states = () if output_hidden_states else None
273
+ hidden_states = inputs_embeds
274
+
275
+ for idx, encoder_layer in enumerate(self.layers):
276
+ if output_hidden_states:
277
+ encoder_states = encoder_states + (hidden_states,)
278
+ if self.gradient_checkpointing and self.training:
279
+ layer_outputs = torch.utils.checkpoint.checkpoint(
280
+ encoder_layer,
281
+ hidden_states)
282
+ else:
283
+ layer_outputs = encoder_layer(
284
+ hidden_states,
285
+ )
286
+ hidden_states = layer_outputs
287
+
288
+ if output_hidden_states:
289
+ encoder_states = encoder_states + (hidden_states,)
290
+
291
+ if not return_dict:
292
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
293
+ return BaseModelOutput(
294
+ last_hidden_state=hidden_states, hidden_states=encoder_states
295
+ )
296
+
297
+
298
+ class InternVisionModel(PreTrainedModel):
299
+ main_input_name = 'pixel_values'
300
+ _supports_flash_attn_2 = True
301
+ config_class = InternVisionConfig
302
+ _no_split_modules = ['InternVisionEncoderLayer']
303
+
304
+ def __init__(self, config: InternVisionConfig):
305
+ super().__init__(config)
306
+ self.config = config
307
+
308
+ self.embeddings = InternVisionEmbeddings(config)
309
+ self.encoder = InternVisionEncoder(config)
310
+
311
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
312
+ pos_emb = self.embeddings.position_embedding
313
+ _, num_positions, embed_dim = pos_emb.shape
314
+ cls_emb = pos_emb[:, :1, :]
315
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
316
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
317
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
318
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
319
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
320
+ self.embeddings.image_size = new_size
321
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
322
+
323
+ def get_input_embeddings(self):
324
+ return self.embeddings
325
+
326
+ def forward(
327
+ self,
328
+ pixel_values: Optional[torch.FloatTensor] = None,
329
+ output_hidden_states: Optional[bool] = None,
330
+ return_dict: Optional[bool] = None,
331
+ pixel_embeds: Optional[torch.FloatTensor] = None,
332
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
333
+ output_hidden_states = (
334
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
335
+ )
336
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
337
+
338
+ if pixel_values is None and pixel_embeds is None:
339
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
340
+
341
+ if pixel_embeds is not None:
342
+ hidden_states = pixel_embeds
343
+ else:
344
+ if len(pixel_values.shape) == 4:
345
+ hidden_states = self.embeddings(pixel_values)
346
+ else:
347
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
348
+ encoder_outputs = self.encoder(
349
+ inputs_embeds=hidden_states,
350
+ output_hidden_states=output_hidden_states,
351
+ return_dict=return_dict,
352
+ )
353
+ last_hidden_state = encoder_outputs.last_hidden_state
354
+ pooled_output = last_hidden_state[:, 0, :]
355
+
356
+ if not return_dict:
357
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
358
+
359
+ return BaseModelOutputWithPooling(
360
+ last_hidden_state=last_hidden_state,
361
+ pooler_output=pooled_output,
362
+ hidden_states=encoder_outputs.hidden_states,
363
+ attentions=encoder_outputs.attentions,
364
+ )
modeling_internlm2.py ADDED
@@ -0,0 +1,1429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
+ import math
18
+ import queue
19
+ import threading
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from einops import rearrange
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
31
+ CausalLMOutputWithPast,
32
+ SequenceClassifierOutputWithPast)
33
+ from transformers.modeling_utils import PreTrainedModel
34
+ from transformers.utils import (add_start_docstrings,
35
+ add_start_docstrings_to_model_forward, logging,
36
+ replace_return_docstrings)
37
+
38
+ try:
39
+ from transformers.generation.streamers import BaseStreamer
40
+ except: # noqa # pylint: disable=bare-except
41
+ BaseStreamer = None
42
+
43
+ from .configuration_internlm2 import InternLM2Config
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = 'InternLM2Config'
48
+
49
+ flash_attn_func, flash_attn_varlen_func = None, None
50
+ pad_input, index_first_axis, unpad_input = None, None, None
51
+ try:
52
+ from flash_attn import flash_attn_func as _flash_attn_func
53
+ from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
54
+ from flash_attn.bert_padding import index_first_axis as _index_first_axis
55
+ from flash_attn.bert_padding import pad_input as _pad_input
56
+ from flash_attn.bert_padding import unpad_input as _unpad_input
57
+
58
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
59
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
60
+ has_flash_attn = True
61
+ except:
62
+ has_flash_attn = False
63
+
64
+
65
+ def _import_flash_attn():
66
+ global flash_attn_func, flash_attn_varlen_func
67
+ global pad_input, index_first_axis, unpad_input
68
+ try:
69
+ from flash_attn import flash_attn_func as _flash_attn_func
70
+ from flash_attn import \
71
+ flash_attn_varlen_func as _flash_attn_varlen_func
72
+ from flash_attn.bert_padding import \
73
+ index_first_axis as _index_first_axis
74
+ from flash_attn.bert_padding import pad_input as _pad_input
75
+ from flash_attn.bert_padding import unpad_input as _unpad_input
76
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
77
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
78
+ except ImportError:
79
+ raise ImportError('flash_attn is not installed.')
80
+
81
+
82
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
83
+ def _get_unpad_data(attention_mask):
84
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
85
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
86
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
87
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
88
+ return (
89
+ indices,
90
+ cu_seqlens,
91
+ max_seqlen_in_batch,
92
+ )
93
+
94
+
95
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
96
+ def _make_causal_mask(
97
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
98
+ ):
99
+ """
100
+ Make causal mask used for bi-directional self-attention.
101
+ """
102
+ bsz, tgt_len = input_ids_shape
103
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
104
+ mask_cond = torch.arange(mask.size(-1), device=device)
105
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
106
+ mask = mask.to(dtype)
107
+
108
+ if past_key_values_length > 0:
109
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
110
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
111
+
112
+
113
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
114
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
115
+ """
116
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
117
+ """
118
+ bsz, src_len = mask.size()
119
+ tgt_len = tgt_len if tgt_len is not None else src_len
120
+
121
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
122
+
123
+ inverted_mask = 1.0 - expanded_mask
124
+
125
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
126
+
127
+
128
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
129
+ class InternLM2RMSNorm(nn.Module):
130
+ def __init__(self, hidden_size, eps=1e-6):
131
+ """
132
+ InternLM2RMSNorm is equivalent to T5LayerNorm
133
+ """
134
+ super().__init__()
135
+ self.weight = nn.Parameter(torch.ones(hidden_size))
136
+ self.variance_epsilon = eps
137
+
138
+ def forward(self, hidden_states):
139
+ input_dtype = hidden_states.dtype
140
+ hidden_states = hidden_states.to(torch.float32)
141
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
142
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
143
+ return self.weight * hidden_states.to(input_dtype)
144
+
145
+
146
+ try:
147
+ from functools import partial
148
+
149
+ from apex.normalization import FusedRMSNorm
150
+ InternLM2RMSNorm = partial(FusedRMSNorm, eps=1e-6) # noqa
151
+ print('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternLM2RMSNorm')
152
+ except ImportError:
153
+ # using the normal LlamaRMSNorm
154
+ pass
155
+ except Exception:
156
+ print('discovered apex but it failed to load, falling back to InternLM2RMSNorm')
157
+ pass
158
+
159
+
160
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
161
+ class InternLM2RotaryEmbedding(nn.Module):
162
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
163
+ super().__init__()
164
+
165
+ self.dim = dim
166
+ self.max_position_embeddings = max_position_embeddings
167
+ self.base = base
168
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
169
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
170
+
171
+ # Build here to make `torch.jit.trace` work.
172
+ self._set_cos_sin_cache(
173
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
174
+ )
175
+
176
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
177
+ self.max_seq_len_cached = seq_len
178
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
179
+
180
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
181
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
182
+ emb = torch.cat((freqs, freqs), dim=-1)
183
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
184
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
185
+
186
+ def forward(self, x, seq_len=None):
187
+ # x: [bs, num_attention_heads, seq_len, head_size]
188
+ if seq_len > self.max_seq_len_cached:
189
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
190
+
191
+ return (
192
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
193
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
194
+ )
195
+
196
+
197
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
198
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
199
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
200
+
201
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
202
+ self.scaling_factor = scaling_factor
203
+ super().__init__(dim, max_position_embeddings, base, device)
204
+
205
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
206
+ self.max_seq_len_cached = seq_len
207
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
208
+ t = t / self.scaling_factor
209
+
210
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
211
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
212
+ emb = torch.cat((freqs, freqs), dim=-1)
213
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
214
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
215
+
216
+
217
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
218
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
219
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
220
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
221
+ """
222
+
223
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
224
+ self.scaling_factor = scaling_factor
225
+ super().__init__(dim, max_position_embeddings, base, device)
226
+
227
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
228
+ self.max_seq_len_cached = seq_len
229
+
230
+ if seq_len > self.max_position_embeddings:
231
+ base = self.base * (
232
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
233
+ ) ** (self.dim / (self.dim - 2))
234
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
235
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
236
+
237
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
238
+
239
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
240
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
241
+ emb = torch.cat((freqs, freqs), dim=-1)
242
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
243
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
244
+
245
+
246
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
247
+ def rotate_half(x):
248
+ """Rotates half the hidden dims of the input."""
249
+ x1 = x[..., : x.shape[-1] // 2]
250
+ x2 = x[..., x.shape[-1] // 2:]
251
+ return torch.cat((-x2, x1), dim=-1)
252
+
253
+
254
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
255
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
256
+ """Applies Rotary Position Embedding to the query and key tensors."""
257
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
258
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
259
+ q_embed = (q * cos) + (rotate_half(q) * sin)
260
+ k_embed = (k * cos) + (rotate_half(k) * sin)
261
+ return q_embed, k_embed
262
+
263
+
264
+ class InternLM2MLP(nn.Module):
265
+ def __init__(self, config):
266
+ super().__init__()
267
+ self.config = config
268
+ self.hidden_size = config.hidden_size
269
+ self.intermediate_size = config.intermediate_size
270
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
271
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
272
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
273
+ self.act_fn = ACT2FN[config.hidden_act]
274
+
275
+ def forward(self, x):
276
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
277
+
278
+ return down_proj
279
+
280
+
281
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
282
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
283
+ """
284
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
285
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
286
+ """
287
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
288
+ if n_rep == 1:
289
+ return hidden_states
290
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
291
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
292
+
293
+
294
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
295
+ class InternLM2Attention(nn.Module):
296
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
297
+
298
+ def __init__(self, config: InternLM2Config):
299
+ super().__init__()
300
+ self.config = config
301
+ self.hidden_size = config.hidden_size
302
+ self.num_heads = config.num_attention_heads
303
+ self.head_dim = self.hidden_size // self.num_heads
304
+ self.num_key_value_heads = config.num_key_value_heads
305
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
306
+ self.max_position_embeddings = config.max_position_embeddings
307
+ self.is_causal = True
308
+
309
+ if (self.head_dim * self.num_heads) != self.hidden_size:
310
+ raise ValueError(
311
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
312
+ f' and `num_heads`: {self.num_heads}).'
313
+ )
314
+
315
+ self.wqkv = nn.Linear(
316
+ self.hidden_size,
317
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
318
+ bias=config.bias,
319
+ )
320
+
321
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
322
+ self._init_rope()
323
+
324
+ def _init_rope(self):
325
+ if self.config.rope_scaling is None:
326
+ self.rotary_emb = InternLM2RotaryEmbedding(
327
+ self.head_dim,
328
+ max_position_embeddings=self.max_position_embeddings,
329
+ base=self.config.rope_theta,
330
+ )
331
+ else:
332
+ scaling_type = self.config.rope_scaling['type']
333
+ scaling_factor = self.config.rope_scaling['factor']
334
+ if scaling_type == 'dynamic':
335
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
336
+ self.head_dim,
337
+ max_position_embeddings=self.max_position_embeddings,
338
+ base=self.config.rope_theta,
339
+ scaling_factor=scaling_factor,
340
+ )
341
+ elif scaling_type == 'linear':
342
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
343
+ self.head_dim,
344
+ max_position_embeddings=self.max_position_embeddings,
345
+ base=self.config.rope_theta,
346
+ scaling_factor=scaling_factor,
347
+ )
348
+ else:
349
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
350
+ return self.rotary_emb
351
+
352
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
353
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
354
+
355
+ def forward(
356
+ self,
357
+ hidden_states: torch.Tensor,
358
+ attention_mask: Optional[torch.Tensor] = None,
359
+ position_ids: Optional[torch.LongTensor] = None,
360
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
361
+ output_attentions: bool = False,
362
+ use_cache: bool = False,
363
+ **kwargs,
364
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
365
+ if 'padding_mask' in kwargs:
366
+ warnings.warn(
367
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
368
+ 'Please make sure use `attention_mask` instead.`'
369
+ )
370
+
371
+ bsz, q_len, _ = hidden_states.size()
372
+
373
+ qkv_states = self.wqkv(hidden_states)
374
+
375
+ qkv_states = rearrange(
376
+ qkv_states,
377
+ 'b q (h gs d) -> b q h gs d',
378
+ gs=2 + self.num_key_value_groups,
379
+ d=self.head_dim,
380
+ )
381
+
382
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
383
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
384
+ key_states = qkv_states[..., -2, :]
385
+ value_states = qkv_states[..., -1, :]
386
+
387
+ query_states = query_states.transpose(1, 2)
388
+ key_states = key_states.transpose(1, 2)
389
+ value_states = value_states.transpose(1, 2)
390
+
391
+ kv_seq_len = key_states.shape[-2]
392
+ if past_key_value is not None:
393
+ kv_seq_len += past_key_value[0].shape[-2]
394
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
395
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
396
+
397
+ if past_key_value is not None:
398
+ # reuse k, v, self_attention
399
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
400
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
401
+
402
+ past_key_value = (key_states, value_states) if use_cache else None
403
+
404
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
405
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
406
+
407
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
408
+
409
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
410
+ raise ValueError(
411
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
412
+ f' {attn_weights.size()}'
413
+ )
414
+
415
+ if attention_mask is not None:
416
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
417
+ raise ValueError(
418
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
419
+ )
420
+ attn_weights = attn_weights + attention_mask
421
+
422
+ # upcast attention to fp32
423
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
424
+ attn_output = torch.matmul(attn_weights, value_states)
425
+
426
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
427
+ raise ValueError(
428
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
429
+ f' {attn_output.size()}'
430
+ )
431
+
432
+ attn_output = attn_output.transpose(1, 2).contiguous()
433
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
434
+
435
+ attn_output = self.wo(attn_output)
436
+
437
+ if not output_attentions:
438
+ attn_weights = None
439
+
440
+ return attn_output, attn_weights, past_key_value
441
+
442
+
443
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
444
+ class InternLM2FlashAttention2(InternLM2Attention):
445
+ """
446
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
447
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
448
+ flash attention and deal with padding tokens in case the input contains any of them.
449
+ """
450
+
451
+ def forward(
452
+ self,
453
+ hidden_states: torch.Tensor,
454
+ attention_mask: Optional[torch.LongTensor] = None,
455
+ position_ids: Optional[torch.LongTensor] = None,
456
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
457
+ output_attentions: bool = False,
458
+ use_cache: bool = False,
459
+ **kwargs,
460
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
461
+ # InternLM2FlashAttention2 attention does not support output_attentions
462
+ if 'padding_mask' in kwargs:
463
+ warnings.warn(
464
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
465
+ 'Please make sure use `attention_mask` instead.`'
466
+ )
467
+
468
+ # overwrite attention_mask with padding_mask
469
+ attention_mask = kwargs.pop('padding_mask')
470
+
471
+ output_attentions = False
472
+
473
+ bsz, q_len, _ = hidden_states.size()
474
+
475
+ qkv_states = self.wqkv(hidden_states)
476
+
477
+ qkv_states = rearrange(
478
+ qkv_states,
479
+ 'b q (h gs d) -> b q h gs d',
480
+ gs=2 + self.num_key_value_groups,
481
+ d=self.head_dim,
482
+ )
483
+
484
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
485
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
486
+ key_states = qkv_states[..., -2, :]
487
+ value_states = qkv_states[..., -1, :]
488
+
489
+ query_states = query_states.transpose(1, 2)
490
+ key_states = key_states.transpose(1, 2)
491
+ value_states = value_states.transpose(1, 2)
492
+
493
+ kv_seq_len = key_states.shape[-2]
494
+ if past_key_value is not None:
495
+ kv_seq_len += past_key_value[0].shape[-2]
496
+
497
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
498
+
499
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
500
+
501
+ if past_key_value is not None:
502
+ # reuse k, v, self_attention
503
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
504
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
505
+
506
+ past_key_value = (key_states, value_states) if use_cache else None
507
+
508
+ query_states = query_states.transpose(1, 2)
509
+ key_states = key_states.transpose(1, 2)
510
+ value_states = value_states.transpose(1, 2)
511
+
512
+ attn_output = self._flash_attention_forward(
513
+ query_states, key_states, value_states, attention_mask, q_len
514
+ )
515
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
516
+ attn_output = self.wo(attn_output)
517
+
518
+ if not output_attentions:
519
+ attn_weights = None
520
+
521
+ return attn_output, attn_weights, past_key_value
522
+
523
+ def _flash_attention_forward(
524
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
525
+ ):
526
+ """
527
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
528
+ first unpad the input, then computes the attention scores and pad the final attention scores.
529
+
530
+ Args:
531
+ query_states (`torch.Tensor`):
532
+ Input query states to be passed to Flash Attention API
533
+ key_states (`torch.Tensor`):
534
+ Input key states to be passed to Flash Attention API
535
+ value_states (`torch.Tensor`):
536
+ Input value states to be passed to Flash Attention API
537
+ attention_mask (`torch.Tensor`):
538
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
539
+ position of padding tokens and 1 for the position of non-padding tokens.
540
+ dropout (`int`, *optional*):
541
+ Attention dropout
542
+ softmax_scale (`float`, *optional*):
543
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
544
+ """
545
+ # Contains at least one padding token in the sequence
546
+ causal = self.is_causal and query_length != 1
547
+ if attention_mask is not None:
548
+ batch_size = query_states.shape[0]
549
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
550
+ query_states, key_states, value_states, attention_mask, query_length
551
+ )
552
+
553
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
554
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
555
+
556
+ attn_output_unpad = flash_attn_varlen_func(
557
+ query_states,
558
+ key_states,
559
+ value_states,
560
+ cu_seqlens_q=cu_seqlens_q,
561
+ cu_seqlens_k=cu_seqlens_k,
562
+ max_seqlen_q=max_seqlen_in_batch_q,
563
+ max_seqlen_k=max_seqlen_in_batch_k,
564
+ dropout_p=dropout,
565
+ softmax_scale=softmax_scale,
566
+ causal=causal,
567
+ )
568
+
569
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
570
+ else:
571
+ attn_output = flash_attn_func(
572
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
573
+ )
574
+
575
+ return attn_output
576
+
577
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
578
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
579
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
580
+
581
+ key_layer = index_first_axis(
582
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
583
+ )
584
+ value_layer = index_first_axis(
585
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
586
+ )
587
+
588
+ if query_length == kv_seq_len:
589
+ query_layer = index_first_axis(
590
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
591
+ )
592
+ cu_seqlens_q = cu_seqlens_k
593
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
594
+ indices_q = indices_k
595
+ elif query_length == 1:
596
+ max_seqlen_in_batch_q = 1
597
+ cu_seqlens_q = torch.arange(
598
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
599
+ ) # There is a memcpy here, that is very bad.
600
+ indices_q = cu_seqlens_q[:-1]
601
+ query_layer = query_layer.squeeze(1)
602
+ else:
603
+ # The -q_len: slice assumes left padding.
604
+ attention_mask = attention_mask[:, -query_length:]
605
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
606
+
607
+ return (
608
+ query_layer,
609
+ key_layer,
610
+ value_layer,
611
+ indices_q.to(torch.int64),
612
+ (cu_seqlens_q, cu_seqlens_k),
613
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
614
+ )
615
+
616
+
617
+ INTERNLM2_ATTENTION_CLASSES = {
618
+ 'eager': InternLM2Attention,
619
+ 'flash_attention_2': InternLM2FlashAttention2,
620
+ }
621
+
622
+
623
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
624
+ class InternLM2DecoderLayer(nn.Module):
625
+ def __init__(self, config: InternLM2Config):
626
+ super().__init__()
627
+ self.hidden_size = config.hidden_size
628
+
629
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
630
+
631
+ self.feed_forward = InternLM2MLP(config)
632
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
633
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
634
+
635
+ def forward(
636
+ self,
637
+ hidden_states: torch.Tensor,
638
+ attention_mask: Optional[torch.Tensor] = None,
639
+ position_ids: Optional[torch.LongTensor] = None,
640
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
641
+ output_attentions: Optional[bool] = False,
642
+ use_cache: Optional[bool] = False,
643
+ **kwargs,
644
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
645
+ """
646
+ Args:
647
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
648
+ attention_mask (`torch.FloatTensor`, *optional*):
649
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
650
+ query_sequence_length, key_sequence_length)` if default attention is used.
651
+ output_attentions (`bool`, *optional*):
652
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
653
+ returned tensors for more detail.
654
+ use_cache (`bool`, *optional*):
655
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
656
+ (see `past_key_values`).
657
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
658
+ """
659
+ if 'padding_mask' in kwargs:
660
+ warnings.warn(
661
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
662
+ 'Please make sure use `attention_mask` instead.`'
663
+ )
664
+
665
+ residual = hidden_states
666
+
667
+ hidden_states = self.attention_norm(hidden_states)
668
+
669
+ # Self Attention
670
+ hidden_states, self_attn_weights, present_key_value = self.attention(
671
+ hidden_states=hidden_states,
672
+ attention_mask=attention_mask,
673
+ position_ids=position_ids,
674
+ past_key_value=past_key_value,
675
+ output_attentions=output_attentions,
676
+ use_cache=use_cache,
677
+ **kwargs,
678
+ )
679
+ hidden_states = residual + hidden_states
680
+
681
+ # Fully Connected
682
+ residual = hidden_states
683
+ hidden_states = self.ffn_norm(hidden_states)
684
+ hidden_states = self.feed_forward(hidden_states)
685
+ hidden_states = residual + hidden_states
686
+
687
+ outputs = (hidden_states,)
688
+
689
+ if output_attentions:
690
+ outputs += (self_attn_weights,)
691
+
692
+ if use_cache:
693
+ outputs += (present_key_value,)
694
+
695
+ return outputs
696
+
697
+
698
+ InternLM2_START_DOCSTRING = r"""
699
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
700
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
701
+ etc.)
702
+
703
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
704
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
705
+ and behavior.
706
+
707
+ Parameters:
708
+ config ([`InternLM2Config`]):
709
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
710
+ load the weights associated with the model, only the configuration. Check out the
711
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
712
+ """
713
+
714
+
715
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
716
+ @add_start_docstrings(
717
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
718
+ InternLM2_START_DOCSTRING,
719
+ )
720
+ class InternLM2PreTrainedModel(PreTrainedModel):
721
+ config_class = InternLM2Config
722
+ base_model_prefix = 'model'
723
+ supports_gradient_checkpointing = True
724
+ _no_split_modules = ['InternLM2DecoderLayer']
725
+ _skip_keys_device_placement = 'past_key_values'
726
+ _supports_flash_attn_2 = True
727
+
728
+ def _init_weights(self, module):
729
+ std = self.config.initializer_range
730
+ if isinstance(module, nn.Linear):
731
+ module.weight.data.normal_(mean=0.0, std=std)
732
+ if module.bias is not None:
733
+ module.bias.data.zero_()
734
+ elif isinstance(module, nn.Embedding):
735
+ module.weight.data.normal_(mean=0.0, std=std)
736
+ if module.padding_idx is not None:
737
+ module.weight.data[module.padding_idx].zero_()
738
+
739
+
740
+ InternLM2_INPUTS_DOCSTRING = r"""
741
+ Args:
742
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
743
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
744
+ it.
745
+
746
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
747
+ [`PreTrainedTokenizer.__call__`] for details.
748
+
749
+ [What are input IDs?](../glossary#input-ids)
750
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
751
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
752
+
753
+ - 1 for tokens that are **not masked**,
754
+ - 0 for tokens that are **masked**.
755
+
756
+ [What are attention masks?](../glossary#attention-mask)
757
+
758
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
759
+ [`PreTrainedTokenizer.__call__`] for details.
760
+
761
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
762
+ `past_key_values`).
763
+
764
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
765
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
766
+ information on the default strategy.
767
+
768
+ - 1 indicates the head is **not masked**,
769
+ - 0 indicates the head is **masked**.
770
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
771
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
772
+ config.n_positions - 1]`.
773
+
774
+ [What are position IDs?](../glossary#position-ids)
775
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
776
+ when `config.use_cache=True`):
777
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
778
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
779
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
780
+
781
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
782
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
783
+
784
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
785
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
786
+ of shape `(batch_size, sequence_length)`.
787
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
788
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
789
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
790
+ model's internal embedding lookup matrix.
791
+ use_cache (`bool`, *optional*):
792
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
793
+ `past_key_values`).
794
+ output_attentions (`bool`, *optional*):
795
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
796
+ tensors for more detail.
797
+ output_hidden_states (`bool`, *optional*):
798
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
799
+ more detail.
800
+ return_dict (`bool`, *optional*):
801
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
802
+ """
803
+
804
+
805
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
806
+ @add_start_docstrings(
807
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
808
+ InternLM2_START_DOCSTRING,
809
+ )
810
+ class InternLM2Model(InternLM2PreTrainedModel):
811
+ """
812
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
813
+
814
+ Args:
815
+ config: InternLM2Config
816
+ """
817
+
818
+ _auto_class = 'AutoModel'
819
+
820
+ def __init__(self, config: InternLM2Config):
821
+ super().__init__(config)
822
+ self.padding_idx = config.pad_token_id
823
+ self.vocab_size = config.vocab_size
824
+ self.config = config
825
+ if not has_flash_attn:
826
+ self.config.attn_implementation = 'eager'
827
+ print('Warning: Flash attention is not available, using eager attention instead.')
828
+
829
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
830
+
831
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
832
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
833
+
834
+ self.gradient_checkpointing = False
835
+ # Initialize weights and apply final processing
836
+ self.post_init()
837
+
838
+ def get_input_embeddings(self):
839
+ return self.tok_embeddings
840
+
841
+ def set_input_embeddings(self, value):
842
+ self.tok_embeddings = value
843
+
844
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
845
+ # create causal mask
846
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
847
+ combined_attention_mask = None
848
+ if input_shape[-1] > 1:
849
+ combined_attention_mask = _make_causal_mask(
850
+ input_shape,
851
+ inputs_embeds.dtype,
852
+ device=inputs_embeds.device,
853
+ past_key_values_length=past_key_values_length,
854
+ )
855
+
856
+ if attention_mask is not None:
857
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
858
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
859
+ inputs_embeds.device
860
+ )
861
+ combined_attention_mask = (
862
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
863
+ )
864
+
865
+ return combined_attention_mask
866
+
867
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
868
+ def forward(
869
+ self,
870
+ input_ids: torch.LongTensor = None,
871
+ attention_mask: Optional[torch.Tensor] = None,
872
+ position_ids: Optional[torch.LongTensor] = None,
873
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
874
+ inputs_embeds: Optional[torch.FloatTensor] = None,
875
+ use_cache: Optional[bool] = None,
876
+ output_attentions: Optional[bool] = None,
877
+ output_hidden_states: Optional[bool] = None,
878
+ return_dict: Optional[bool] = None,
879
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
880
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
881
+ output_hidden_states = (
882
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
883
+ )
884
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
885
+
886
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
887
+
888
+ if self.config.attn_implementation == 'flash_attention_2':
889
+ _import_flash_attn()
890
+
891
+ # retrieve input_ids and inputs_embeds
892
+ if input_ids is not None and inputs_embeds is not None:
893
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
894
+ elif input_ids is not None:
895
+ batch_size, seq_length = input_ids.shape[:2]
896
+ elif inputs_embeds is not None:
897
+ batch_size, seq_length = inputs_embeds.shape[:2]
898
+ else:
899
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
900
+
901
+ seq_length_with_past = seq_length
902
+ past_key_values_length = 0
903
+ if past_key_values is not None:
904
+ past_key_values_length = past_key_values[0][0].shape[2]
905
+ seq_length_with_past = seq_length_with_past + past_key_values_length
906
+
907
+ if position_ids is None:
908
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
909
+ position_ids = torch.arange(
910
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
911
+ )
912
+ position_ids = position_ids.unsqueeze(0)
913
+
914
+ if inputs_embeds is None:
915
+ inputs_embeds = self.tok_embeddings(input_ids)
916
+
917
+ if self.config.attn_implementation == 'flash_attention_2':
918
+ # 2d mask is passed through the layers
919
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
920
+ else:
921
+ if attention_mask is None:
922
+ attention_mask = torch.ones(
923
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
924
+ )
925
+ attention_mask = self._prepare_decoder_attention_mask(
926
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
927
+ )
928
+
929
+ # embed positions
930
+ hidden_states = inputs_embeds
931
+
932
+ if self.gradient_checkpointing and self.training:
933
+ if use_cache:
934
+ logger.warning_once(
935
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
936
+ )
937
+ use_cache = False
938
+
939
+ # decoder layers
940
+ all_hidden_states = () if output_hidden_states else None
941
+ all_self_attns = () if output_attentions else None
942
+ next_decoder_cache = () if use_cache else None
943
+
944
+ for idx, decoder_layer in enumerate(self.layers):
945
+ if output_hidden_states:
946
+ all_hidden_states += (hidden_states,)
947
+
948
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
949
+
950
+ if self.gradient_checkpointing and self.training:
951
+
952
+ def create_custom_forward(module):
953
+ def custom_forward(*inputs):
954
+ # None for past_key_value
955
+ return module(*inputs, output_attentions, None)
956
+
957
+ return custom_forward
958
+
959
+ layer_outputs = torch.utils.checkpoint.checkpoint(
960
+ create_custom_forward(decoder_layer),
961
+ hidden_states,
962
+ attention_mask,
963
+ position_ids,
964
+ None,
965
+ )
966
+ else:
967
+ layer_outputs = decoder_layer(
968
+ hidden_states,
969
+ attention_mask=attention_mask,
970
+ position_ids=position_ids,
971
+ past_key_value=past_key_value,
972
+ output_attentions=output_attentions,
973
+ use_cache=use_cache,
974
+ )
975
+
976
+ hidden_states = layer_outputs[0]
977
+
978
+ if use_cache:
979
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
980
+
981
+ if output_attentions:
982
+ all_self_attns += (layer_outputs[1],)
983
+
984
+ hidden_states = self.norm(hidden_states)
985
+
986
+ # add hidden states from the last decoder layer
987
+ if output_hidden_states:
988
+ all_hidden_states += (hidden_states,)
989
+
990
+ next_cache = next_decoder_cache if use_cache else None
991
+ if not return_dict:
992
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
993
+ return BaseModelOutputWithPast(
994
+ last_hidden_state=hidden_states,
995
+ past_key_values=next_cache,
996
+ hidden_states=all_hidden_states,
997
+ attentions=all_self_attns,
998
+ )
999
+
1000
+
1001
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
1002
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
1003
+ _auto_class = 'AutoModelForCausalLM'
1004
+
1005
+ _tied_weights_keys = ['output.weight']
1006
+
1007
+ def __init__(self, config):
1008
+ super().__init__(config)
1009
+ self.model = InternLM2Model(config)
1010
+ self.vocab_size = config.vocab_size
1011
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1012
+
1013
+ # Initialize weights and apply final processing
1014
+ self.post_init()
1015
+
1016
+ def get_input_embeddings(self):
1017
+ return self.model.tok_embeddings
1018
+
1019
+ def set_input_embeddings(self, value):
1020
+ self.model.tok_embeddings = value
1021
+
1022
+ def get_output_embeddings(self):
1023
+ return self.output
1024
+
1025
+ def set_output_embeddings(self, new_embeddings):
1026
+ self.output = new_embeddings
1027
+
1028
+ def set_decoder(self, decoder):
1029
+ self.model = decoder
1030
+
1031
+ def get_decoder(self):
1032
+ return self.model
1033
+
1034
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1035
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1036
+ def forward(
1037
+ self,
1038
+ input_ids: torch.LongTensor = None,
1039
+ attention_mask: Optional[torch.Tensor] = None,
1040
+ position_ids: Optional[torch.LongTensor] = None,
1041
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1042
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1043
+ labels: Optional[torch.LongTensor] = None,
1044
+ use_cache: Optional[bool] = None,
1045
+ output_attentions: Optional[bool] = None,
1046
+ output_hidden_states: Optional[bool] = None,
1047
+ return_dict: Optional[bool] = None,
1048
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1049
+ r"""
1050
+ Args:
1051
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1052
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1053
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1054
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1055
+
1056
+ Returns:
1057
+
1058
+ Example:
1059
+
1060
+ ```python
1061
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1062
+
1063
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1064
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1065
+
1066
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1067
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1068
+
1069
+ >>> # Generate
1070
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1071
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1072
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1073
+ ```"""
1074
+
1075
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1076
+ output_hidden_states = (
1077
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1078
+ )
1079
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1080
+
1081
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1082
+ outputs = self.model(
1083
+ input_ids=input_ids,
1084
+ attention_mask=attention_mask,
1085
+ position_ids=position_ids,
1086
+ past_key_values=past_key_values,
1087
+ inputs_embeds=inputs_embeds,
1088
+ use_cache=use_cache,
1089
+ output_attentions=output_attentions,
1090
+ output_hidden_states=output_hidden_states,
1091
+ return_dict=return_dict,
1092
+ )
1093
+
1094
+ hidden_states = outputs[0]
1095
+ logits = self.output(hidden_states)
1096
+ logits = logits.float()
1097
+
1098
+ loss = None
1099
+ if labels is not None:
1100
+ # Shift so that tokens < n predict n
1101
+ shift_logits = logits[..., :-1, :].contiguous()
1102
+ shift_labels = labels[..., 1:].contiguous()
1103
+ # Flatten the tokens
1104
+ loss_fct = CrossEntropyLoss()
1105
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1106
+ shift_labels = shift_labels.view(-1)
1107
+ # Enable model parallelism
1108
+ shift_labels = shift_labels.to(shift_logits.device)
1109
+ loss = loss_fct(shift_logits, shift_labels)
1110
+
1111
+ if not return_dict:
1112
+ output = (logits,) + outputs[1:]
1113
+ return (loss,) + output if loss is not None else output
1114
+
1115
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1116
+ output = CausalLMOutputWithPast(
1117
+ loss=loss,
1118
+ logits=logits,
1119
+ past_key_values=outputs.past_key_values,
1120
+ hidden_states=outputs.hidden_states,
1121
+ attentions=outputs.attentions,
1122
+ )
1123
+ output['logits'] = output['logits'].to(device)
1124
+ return output
1125
+
1126
+ def prepare_inputs_for_generation(
1127
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1128
+ ):
1129
+ if past_key_values is not None:
1130
+ past_length = past_key_values[0][0].shape[2]
1131
+
1132
+ # Some generation methods already pass only the last input ID
1133
+ if input_ids.shape[1] > past_length:
1134
+ remove_prefix_length = past_length
1135
+ else:
1136
+ # Default to old behavior: keep only final ID
1137
+ remove_prefix_length = input_ids.shape[1] - 1
1138
+
1139
+ input_ids = input_ids[:, remove_prefix_length:]
1140
+
1141
+ position_ids = kwargs.get('position_ids', None)
1142
+ if attention_mask is not None and position_ids is None:
1143
+ # create position_ids on the fly for batch generation
1144
+ position_ids = attention_mask.long().cumsum(-1) - 1
1145
+ position_ids.masked_fill_(attention_mask == 0, 1)
1146
+ if past_key_values:
1147
+ position_ids = position_ids[:, -input_ids.shape[1]:]
1148
+
1149
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1150
+ if inputs_embeds is not None and past_key_values is None:
1151
+ model_inputs = {'inputs_embeds': inputs_embeds}
1152
+ else:
1153
+ model_inputs = {'input_ids': input_ids}
1154
+
1155
+ model_inputs.update(
1156
+ {
1157
+ 'position_ids': position_ids,
1158
+ 'past_key_values': past_key_values,
1159
+ 'use_cache': kwargs.get('use_cache'),
1160
+ 'attention_mask': attention_mask,
1161
+ }
1162
+ )
1163
+ return model_inputs
1164
+
1165
+ @staticmethod
1166
+ def _reorder_cache(past_key_values, beam_idx):
1167
+ reordered_past = ()
1168
+ for layer_past in past_key_values:
1169
+ reordered_past += (
1170
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1171
+ )
1172
+ return reordered_past
1173
+
1174
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=''):
1175
+ if tokenizer.add_bos_token:
1176
+ prompt = ''
1177
+ else:
1178
+ prompt = tokenizer.bos_token
1179
+ if meta_instruction:
1180
+ prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
1181
+ for record in history:
1182
+ prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1183
+ prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1184
+ return tokenizer([prompt], return_tensors='pt')
1185
+
1186
+ @torch.no_grad()
1187
+ def chat(
1188
+ self,
1189
+ tokenizer,
1190
+ query: str,
1191
+ history: List[Tuple[str, str]] = [],
1192
+ streamer: Optional[BaseStreamer] = None,
1193
+ max_new_tokens: int = 1024,
1194
+ do_sample: bool = True,
1195
+ temperature: float = 0.8,
1196
+ top_p: float = 0.8,
1197
+ meta_instruction: str = 'You are an AI assistant whose name is InternLM (书生·浦语).\n'
1198
+ '- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n'
1199
+ '- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.',
1200
+ **kwargs,
1201
+ ):
1202
+ inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1203
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1204
+ # also add end-of-assistant token in eos token id to avoid unnecessary generation
1205
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(['<|im_end|>'])[0]]
1206
+ outputs = self.generate(
1207
+ **inputs,
1208
+ streamer=streamer,
1209
+ max_new_tokens=max_new_tokens,
1210
+ do_sample=do_sample,
1211
+ temperature=temperature,
1212
+ top_p=top_p,
1213
+ eos_token_id=eos_token_id,
1214
+ **kwargs,
1215
+ )
1216
+ outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]):]
1217
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1218
+ response = response.split('<|im_end|>')[0]
1219
+ history = history + [(query, response)]
1220
+ return response, history
1221
+
1222
+ @torch.no_grad()
1223
+ def stream_chat(
1224
+ self,
1225
+ tokenizer,
1226
+ query: str,
1227
+ history: List[Tuple[str, str]] = [],
1228
+ max_new_tokens: int = 1024,
1229
+ do_sample: bool = True,
1230
+ temperature: float = 0.8,
1231
+ top_p: float = 0.8,
1232
+ **kwargs,
1233
+ ):
1234
+ """
1235
+ Return a generator in format: (response, history)
1236
+ Eg.
1237
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
1238
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
1239
+ """
1240
+ if BaseStreamer is None:
1241
+ raise ModuleNotFoundError(
1242
+ 'The version of `transformers` is too low. Please make sure '
1243
+ 'that you have installed `transformers>=4.28.0`.'
1244
+ )
1245
+
1246
+ response_queue = queue.Queue(maxsize=20)
1247
+
1248
+ class ChatStreamer(BaseStreamer):
1249
+ def __init__(self, tokenizer) -> None:
1250
+ super().__init__()
1251
+ self.tokenizer = tokenizer
1252
+ self.queue = response_queue
1253
+ self.query = query
1254
+ self.history = history
1255
+ self.response = ''
1256
+ self.cache = []
1257
+ self.received_inputs = False
1258
+ self.queue.put((self.response, history + [(self.query, self.response)]))
1259
+
1260
+ def put(self, value):
1261
+ if len(value.shape) > 1 and value.shape[0] > 1:
1262
+ raise ValueError('ChatStreamer only supports batch size 1')
1263
+ elif len(value.shape) > 1:
1264
+ value = value[0]
1265
+
1266
+ if not self.received_inputs:
1267
+ # The first received value is input_ids, ignore here
1268
+ self.received_inputs = True
1269
+ return
1270
+
1271
+ self.cache.extend(value.tolist())
1272
+ token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
1273
+ if token.strip() != '<|im_end|>':
1274
+ self.response = self.response + token
1275
+ history = self.history + [(self.query, self.response)]
1276
+ self.queue.put((self.response, history))
1277
+ self.cache = []
1278
+ else:
1279
+ self.end()
1280
+
1281
+ def end(self):
1282
+ self.queue.put(None)
1283
+
1284
+ def stream_producer():
1285
+ return self.chat(
1286
+ tokenizer=tokenizer,
1287
+ query=query,
1288
+ streamer=ChatStreamer(tokenizer=tokenizer),
1289
+ history=history,
1290
+ max_new_tokens=max_new_tokens,
1291
+ do_sample=do_sample,
1292
+ temperature=temperature,
1293
+ top_p=top_p,
1294
+ **kwargs,
1295
+ )
1296
+
1297
+ def consumer():
1298
+ producer = threading.Thread(target=stream_producer)
1299
+ producer.start()
1300
+ while True:
1301
+ res = response_queue.get()
1302
+ if res is None:
1303
+ return
1304
+ yield res
1305
+
1306
+ return consumer()
1307
+
1308
+
1309
+ # Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1310
+ @add_start_docstrings(
1311
+ """
1312
+ The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1313
+
1314
+ [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
1315
+ as other causal models (e.g. GPT-2) do.
1316
+
1317
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1318
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1319
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1320
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1321
+ each row of the batch).
1322
+ """,
1323
+ InternLM2_START_DOCSTRING,
1324
+ )
1325
+ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1326
+ def __init__(self, config):
1327
+ super().__init__(config)
1328
+ self.num_labels = config.num_labels
1329
+ self.model = InternLM2Model(config)
1330
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1331
+
1332
+ # Initialize weights and apply final processing
1333
+ self.post_init()
1334
+
1335
+ def get_input_embeddings(self):
1336
+ return self.model.tok_embeddings
1337
+
1338
+ def set_input_embeddings(self, value):
1339
+ self.model.tok_embeddings = value
1340
+
1341
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1342
+ def forward(
1343
+ self,
1344
+ input_ids: torch.LongTensor = None,
1345
+ attention_mask: Optional[torch.Tensor] = None,
1346
+ position_ids: Optional[torch.LongTensor] = None,
1347
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1348
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1349
+ labels: Optional[torch.LongTensor] = None,
1350
+ use_cache: Optional[bool] = None,
1351
+ output_attentions: Optional[bool] = None,
1352
+ output_hidden_states: Optional[bool] = None,
1353
+ return_dict: Optional[bool] = None,
1354
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1355
+ r"""
1356
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1357
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1358
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1359
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1360
+ """
1361
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1362
+
1363
+ transformer_outputs = self.model(
1364
+ input_ids,
1365
+ attention_mask=attention_mask,
1366
+ position_ids=position_ids,
1367
+ past_key_values=past_key_values,
1368
+ inputs_embeds=inputs_embeds,
1369
+ use_cache=use_cache,
1370
+ output_attentions=output_attentions,
1371
+ output_hidden_states=output_hidden_states,
1372
+ return_dict=return_dict,
1373
+ )
1374
+ hidden_states = transformer_outputs[0]
1375
+ logits = self.score(hidden_states)
1376
+
1377
+ if input_ids is not None:
1378
+ batch_size = input_ids.shape[0]
1379
+ else:
1380
+ batch_size = inputs_embeds.shape[0]
1381
+
1382
+ if self.config.pad_token_id is None and batch_size != 1:
1383
+ raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
1384
+ if self.config.pad_token_id is None:
1385
+ sequence_lengths = -1
1386
+ else:
1387
+ if input_ids is not None:
1388
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1389
+ logits.device
1390
+ )
1391
+ else:
1392
+ sequence_lengths = -1
1393
+
1394
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1395
+
1396
+ loss = None
1397
+ if labels is not None:
1398
+ labels = labels.to(logits.device)
1399
+ if self.config.problem_type is None:
1400
+ if self.num_labels == 1:
1401
+ self.config.problem_type = 'regression'
1402
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1403
+ self.config.problem_type = 'single_label_classification'
1404
+ else:
1405
+ self.config.problem_type = 'multi_label_classification'
1406
+
1407
+ if self.config.problem_type == 'regression':
1408
+ loss_fct = MSELoss()
1409
+ if self.num_labels == 1:
1410
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1411
+ else:
1412
+ loss = loss_fct(pooled_logits, labels)
1413
+ elif self.config.problem_type == 'single_label_classification':
1414
+ loss_fct = CrossEntropyLoss()
1415
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1416
+ elif self.config.problem_type == 'multi_label_classification':
1417
+ loss_fct = BCEWithLogitsLoss()
1418
+ loss = loss_fct(pooled_logits, labels)
1419
+ if not return_dict:
1420
+ output = (pooled_logits,) + transformer_outputs[1:]
1421
+ return ((loss,) + output) if loss is not None else output
1422
+
1423
+ return SequenceClassifierOutputWithPast(
1424
+ loss=loss,
1425
+ logits=pooled_logits,
1426
+ past_key_values=transformer_outputs.past_key_values,
1427
+ hidden_states=transformer_outputs.hidden_states,
1428
+ attentions=transformer_outputs.attentions,
1429
+ )