DLight1551 commited on
Commit
df2c233
·
1 Parent(s): 21fc22e
build_mlp.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import re
4
+ import math
5
+ from transformers import CLIPVisionModel, CLIPImageProcessor, CLIPVisionConfig
6
+
7
+
8
+ def build_vision_tower():
9
+ vision_tower = 'openai/clip-vit-large-patch14-336'
10
+ return CLIPVisionTower(vision_tower)
11
+
12
+
13
+ def build_vision_projector():
14
+ projector_type = 'mlp2x_gelu'
15
+ mm_hidden_size = 1024
16
+ hidden_size = 4096
17
+
18
+ mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
19
+ if mlp_gelu_match:
20
+ mlp_depth = int(mlp_gelu_match.group(1))
21
+ modules = [nn.Linear(mm_hidden_size, hidden_size)]
22
+ for _ in range(1, mlp_depth):
23
+ modules.append(nn.GELU())
24
+ modules.append(nn.Linear(hidden_size, hidden_size))
25
+ return nn.Sequential(*modules)
26
+
27
+ if projector_type == 'identity':
28
+ return IdentityMap()
29
+
30
+ raise ValueError(f'Unknown projector type: {projector_type}')
31
+
32
+ class IdentityMap(nn.Module):
33
+ def __init__(self):
34
+ super().__init__()
35
+
36
+ def forward(self, x, *args, **kwargs):
37
+ return x
38
+
39
+ @property
40
+ def config(self):
41
+ return {"mm_projector_type": 'identity'}
42
+
43
+
44
+ class CLIPVisionTower(nn.Module):
45
+ def __init__(self, vision_tower):
46
+ super().__init__()
47
+
48
+ self.is_loaded = False
49
+ self.is_resize_pos = False
50
+
51
+ self.vision_tower_name = vision_tower
52
+ self.select_layer = -1
53
+ self.select_feature = 'patch'
54
+ self.load_model()
55
+ self.resize_pos()
56
+
57
+ def load_model(self):
58
+ self.vision_tower = CLIPVisionModel.from_pretrained(self.vision_tower_name)
59
+ self.vision_tower.requires_grad_(False)
60
+
61
+ self.is_loaded = True
62
+ def resize_pos(self):
63
+ pos_embed_checkpoint = self.vision_tower.vision_model.embeddings.position_embedding.weight
64
+ pos_embed_checkpoint = pos_embed_checkpoint.unsqueeze(0)
65
+ orig_size = 24
66
+ new_size = 16
67
+
68
+ if pos_embed_checkpoint.shape[1] == new_size ** 2 + 1:
69
+ self.is_resize_pos = True
70
+ else:
71
+ embedding_size = pos_embed_checkpoint.shape[-1]
72
+ num_extra_tokens = 1
73
+ new_num = new_size ** 2 + num_extra_tokens
74
+ print("Position interpolate from %dx%d to %dx%d" %
75
+ (orig_size, orig_size, new_size, new_size))
76
+ extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
77
+ # only the position tokens are interpolated
78
+ pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
79
+ pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size,
80
+ embedding_size).permute(
81
+ 0, 3, 1, 2)
82
+ pos_tokens = torch.nn.functional.interpolate(pos_tokens,
83
+ size=(new_size,
84
+ new_size),
85
+ mode='bicubic',
86
+ align_corners=False)
87
+ pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
88
+ new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
89
+
90
+ new_pos_embed = new_pos_embed.squeeze(0)
91
+
92
+ self.vision_tower.vision_model.embeddings.position_embedding = torch.nn.Embedding(new_num, 1024)
93
+ self.vision_tower.vision_model.embeddings.position_embedding.weight = torch.nn.Parameter(new_pos_embed.to(pos_embed_checkpoint.dtype))
94
+ self.vision_tower.vision_model.embeddings.position_ids = torch.arange(new_num).expand((1, -1))
95
+
96
+ #self.vision_tower.vision_model.embeddings.position_embedding.weight = torch.nn.Parameter(new_pos_embed.to(pos_embed_checkpoint.device).to(pos_embed_checkpoint.dtype))
97
+ #self.vision_tower.vision_model.embeddings.position_ids = torch.arange(new_num).expand((1, -1)).to(pos_embed_checkpoint.device)
98
+ self.is_resize_pos = True
99
+
100
+ def feature_select(self, image_forward_outs):
101
+ image_features = image_forward_outs.hidden_states[self.select_layer]
102
+ if self.select_feature == 'patch':
103
+ image_features = image_features[:, 1:]
104
+ elif self.select_feature == 'cls_patch':
105
+ image_features = image_features
106
+ else:
107
+ raise ValueError(f'Unexpected select feature: {self.select_feature}')
108
+ return image_features
109
+
110
+ def forward(self, images):
111
+ if not self.is_loaded:
112
+ self.load_model()
113
+ if type(images) is list:
114
+ image_features = []
115
+ for image in images:
116
+ image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True)
117
+ image_feature = self.feature_select(image_forward_out).to(image.dtype)
118
+ image_features.append(image_feature)
119
+ else:
120
+ image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
121
+ image_features = self.feature_select(image_forward_outs).to(images.dtype)
122
+
123
+ return image_features
124
+
125
+ @property
126
+ def dummy_feature(self):
127
+ return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
128
+
129
+ @property
130
+ def dtype(self):
131
+ return self.vision_tower.dtype
132
+
133
+ @property
134
+ def device(self):
135
+ return self.vision_tower.device
136
+
137
+ @property
138
+ def config(self):
139
+ if self.is_loaded:
140
+ return self.vision_tower.config
141
+ else:
142
+ return self.cfg_only
143
+
144
+ @property
145
+ def hidden_size(self):
146
+ return self.config.hidden_size
147
+
148
+ @property
149
+ def num_patches(self):
150
+ return (self.config.image_size // self.config.patch_size) ** 2
151
+
152
+ class PLoRA(nn.Linear):
153
+ def __init__(self,
154
+ in_features: int,
155
+ out_features: int,
156
+ bias: bool = True,
157
+ device=None,
158
+ dtype=None,
159
+ lora_r=8,
160
+ lora_alpha=16,
161
+ lora_dropout=0.05,
162
+ lora_len=0,
163
+ **kwargs) -> None:
164
+ super().__init__(in_features, out_features, bias, device, dtype)
165
+ self.lora_r = lora_r
166
+ self.lora_alpha = lora_alpha
167
+ self.lora_len = lora_len
168
+ if lora_dropout > 0.:
169
+ self.lora_dropout = nn.Dropout(p=lora_dropout)
170
+ else:
171
+ self.lora_dropout = lambda x: x
172
+ self.lora_scaling = self.lora_alpha / self.lora_r
173
+
174
+ self.Plora_A = nn.Linear(in_features,
175
+ self.lora_r,
176
+ bias=False,
177
+ device=device,
178
+ dtype=dtype)
179
+ self.Plora_B = nn.Linear(self.lora_r,
180
+ out_features,
181
+ bias=False,
182
+ device=device,
183
+ dtype=dtype)
184
+
185
+ self.reset_parameters()
186
+
187
+ def reset_parameters(self):
188
+ if hasattr(self, 'lora_A'):
189
+ # initialize A the same way as the default for nn.Linear and B to zero
190
+ nn.init.kaiming_uniform_(self.lora_A.weight, a=math.sqrt(5))
191
+ nn.init.zeros_(self.lora_B.weight)
192
+ #print ("lora weight init {} {}".format(torch.mean(self.lora_A.weight), torch.mean(self.lora_B.weight)))
193
+
194
+ def forward(self, x, im_mask=None):
195
+ res = super().forward(x)
196
+ if im_mask is not None:
197
+ if torch.sum(im_mask) > 0:
198
+ part_x = x[im_mask]
199
+ res[im_mask] += self.Plora_B(self.Plora_A(
200
+ self.lora_dropout(part_x))) * self.lora_scaling
201
+ else:
202
+ part_x = x[:, :1]
203
+ res[:, :1] += self.Plora_B(self.Plora_A(
204
+ self.lora_dropout(part_x))) * 0
205
+ return res
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/mnt/hwfile/mllm/zangyuhang/share_models/INT2_RL_P256_S2PT224_0113_B2_Decay1e5_ftV_MIX_FIX/",
3
+ "architectures": [
4
+ "InternLM2ForCausalLM"
5
+ ],
6
+ "attn_implementation": "eager",
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_internlm.InternLMConfig",
9
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
10
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM"
11
+ },
12
+ "bias": false,
13
+ "bos_token_id": 1,
14
+ "eos_token_id": 2,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 4096,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 14336,
19
+ "max_length": 8192,
20
+ "max_position_embeddings": 32768,
21
+ "model_type": "internlm",
22
+ "num_attention_heads": 32,
23
+ "num_hidden_layers": 32,
24
+ "num_key_value_heads": 8,
25
+ "pad_token_id": 2,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": {
28
+ "factor": 1.0,
29
+ "type": "dynamic"
30
+ },
31
+ "rope_theta": 1000000,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.33.1",
35
+ "use_cache": false,
36
+ "vocab_size": 92544
37
+ }
configuration_internlm.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ """ InternLM model configuration"""
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
28
+
29
+
30
+ class InternLMConfig(PretrainedConfig):
31
+ r"""
32
+ This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
33
+ an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
34
+ configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 32000):
42
+ Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
43
+ `inputs_ids` passed when calling [`InternLMModel`]
44
+ hidden_size (`int`, *optional*, defaults to 4096):
45
+ Dimension of the hidden representations.
46
+ intermediate_size (`int`, *optional*, defaults to 11008):
47
+ Dimension of the MLP representations.
48
+ num_hidden_layers (`int`, *optional*, defaults to 32):
49
+ Number of hidden layers in the Transformer encoder.
50
+ num_attention_heads (`int`, *optional*, defaults to 32):
51
+ Number of attention heads for each attention layer in the Transformer encoder.
52
+ num_key_value_heads (`int`, *optional*):
53
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
+ by meanpooling all the original heads within that group. For more details checkout [this
58
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
59
+ `num_attention_heads`.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the decoder.
62
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
63
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
64
+ just in case (e.g., 512 or 1024 or 2048).
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
73
+ Whether to tie weight embeddings
74
+ Example:
75
+
76
+ ```python
77
+ >>> from transformers import InternLMModel, InternLMConfig
78
+
79
+ >>> # Initializing a InternLM internlm-7b style configuration
80
+ >>> configuration = InternLMConfig()
81
+
82
+ >>> # Initializing a model from the internlm-7b style configuration
83
+ >>> model = InternLMModel(configuration)
84
+
85
+ >>> # Accessing the model configuration
86
+ >>> configuration = model.config
87
+ ```"""
88
+ model_type = "internlm"
89
+ _auto_class = "AutoConfig"
90
+
91
+ def __init__( # pylint: disable=W0102
92
+ self,
93
+ vocab_size=103168,
94
+ hidden_size=4096,
95
+ intermediate_size=11008,
96
+ num_hidden_layers=32,
97
+ num_attention_heads=32,
98
+ num_key_value_heads=None,
99
+ hidden_act="silu",
100
+ max_position_embeddings=2048,
101
+ initializer_range=0.02,
102
+ rms_norm_eps=1e-6,
103
+ use_cache=True,
104
+ pad_token_id=0,
105
+ bos_token_id=1,
106
+ eos_token_id=2,
107
+ tie_word_embeddings=False,
108
+ bias=True,
109
+ rope_theta=10000,
110
+ rope_scaling=None,
111
+ attn_implementation="eager",
112
+ **kwargs,
113
+ ):
114
+ self.vocab_size = vocab_size
115
+ self.max_position_embeddings = max_position_embeddings
116
+ self.hidden_size = hidden_size
117
+ self.intermediate_size = intermediate_size
118
+ self.num_hidden_layers = num_hidden_layers
119
+ self.num_attention_heads = num_attention_heads
120
+ self.bias = bias
121
+
122
+ if num_key_value_heads is None:
123
+ num_key_value_heads = num_attention_heads
124
+ self.num_key_value_heads = num_key_value_heads
125
+
126
+ self.hidden_act = hidden_act
127
+ self.initializer_range = initializer_range
128
+ self.rms_norm_eps = rms_norm_eps
129
+ self.use_cache = use_cache
130
+ self.rope_theta = rope_theta
131
+ self.rope_scaling = rope_scaling
132
+ self._rope_scaling_validation()
133
+
134
+ self.attn_implementation = attn_implementation
135
+ if self.attn_implementation is None:
136
+ self.attn_implementation = "eager"
137
+ super().__init__(
138
+ pad_token_id=pad_token_id,
139
+ bos_token_id=bos_token_id,
140
+ eos_token_id=eos_token_id,
141
+ tie_word_embeddings=tie_word_embeddings,
142
+ **kwargs,
143
+ )
144
+
145
+ def _rope_scaling_validation(self):
146
+ """
147
+ Validate the `rope_scaling` configuration.
148
+ """
149
+ if self.rope_scaling is None:
150
+ return
151
+
152
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
153
+ raise ValueError(
154
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
155
+ f"got {self.rope_scaling}"
156
+ )
157
+ rope_scaling_type = self.rope_scaling.get("type", None)
158
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
159
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
160
+ raise ValueError(
161
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
162
+ )
163
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
164
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "max_length": 640,
6
+ "pad_token_id": 2,
7
+ "transformers_version": "4.33.1",
8
+ "use_cache": false
9
+ }
modeling_internlm2.py ADDED
@@ -0,0 +1,1546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # Copyright (c) InternLM. All rights reserved.
2
+ #
3
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
4
+ # and OPT implementations in this library. It has been modified from its
5
+ # original forms to accommodate minor architectural differences compared
6
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ """PyTorch InternLM2 model."""
20
+ import copy
21
+ import math
22
+ import queue
23
+ import threading
24
+ import warnings
25
+ from typing import List, Optional, Tuple, Union
26
+
27
+ import torch
28
+ import torch.utils.checkpoint
29
+ from einops import rearrange
30
+ from PIL import Image
31
+ from torch import nn
32
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
33
+ from torchvision import transforms
34
+ from torchvision.transforms.functional import InterpolationMode
35
+ from transformers.activations import ACT2FN
36
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
37
+ CausalLMOutputWithPast,
38
+ SequenceClassifierOutputWithPast)
39
+ from transformers.modeling_utils import PreTrainedModel
40
+ from transformers.utils import (add_start_docstrings,
41
+ add_start_docstrings_to_model_forward, logging,
42
+ replace_return_docstrings)
43
+
44
+ try:
45
+ from transformers.generation.streamers import BaseStreamer
46
+ except: # noqa # pylint: disable=bare-except
47
+ BaseStreamer = None
48
+
49
+ from .build_mlp import PLoRA, build_vision_projector, build_vision_tower
50
+ from .configuration_internlm import InternLMConfig as InternLM2Config
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+ _CONFIG_FOR_DOC = 'InternLM2Config'
55
+
56
+
57
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
58
+ def _make_causal_mask(input_ids_shape: torch.Size,
59
+ dtype: torch.dtype,
60
+ device: torch.device,
61
+ past_key_values_length: int = 0):
62
+ """Make causal mask used for bi-directional self-attention."""
63
+ bsz, tgt_len = input_ids_shape
64
+ mask = torch.full((tgt_len, tgt_len),
65
+ torch.tensor(torch.finfo(dtype).min, device=device),
66
+ device=device)
67
+ mask_cond = torch.arange(mask.size(-1), device=device)
68
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
69
+ mask = mask.to(dtype)
70
+
71
+ if past_key_values_length > 0:
72
+ mask = torch.cat([
73
+ torch.zeros(
74
+ tgt_len, past_key_values_length, dtype=dtype, device=device),
75
+ mask
76
+ ],
77
+ dim=-1)
78
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len,
79
+ tgt_len + past_key_values_length)
80
+
81
+
82
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
83
+ def _expand_mask(mask: torch.Tensor,
84
+ dtype: torch.dtype,
85
+ tgt_len: Optional[int] = None):
86
+ """Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len,
87
+ src_seq_len]`."""
88
+ bsz, src_len = mask.size()
89
+ tgt_len = tgt_len if tgt_len is not None else src_len
90
+
91
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len,
92
+ src_len).to(dtype)
93
+
94
+ inverted_mask = 1.0 - expanded_mask
95
+
96
+ return inverted_mask.masked_fill(
97
+ inverted_mask.to(torch.bool),
98
+ torch.finfo(dtype).min)
99
+
100
+
101
+ class InternLM2RMSNorm(nn.Module):
102
+
103
+ def __init__(self, hidden_size, eps=1e-6):
104
+ """InternLM2RMSNorm is equivalent to T5LayerNorm."""
105
+ super().__init__()
106
+ self.weight = nn.Parameter(torch.ones(hidden_size))
107
+ self.variance_epsilon = eps
108
+
109
+ def forward(self, hidden_states):
110
+ input_dtype = hidden_states.dtype
111
+ hidden_states = hidden_states.to(torch.float32)
112
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
113
+ hidden_states = hidden_states * torch.rsqrt(variance +
114
+ self.variance_epsilon)
115
+ return self.weight * hidden_states.to(input_dtype)
116
+
117
+
118
+ class InternLM2RotaryEmbedding(nn.Module):
119
+
120
+ def __init__(self,
121
+ dim,
122
+ max_position_embeddings=2048,
123
+ base=10000,
124
+ device=None):
125
+ super().__init__()
126
+
127
+ self.dim = dim
128
+ self.max_position_embeddings = max_position_embeddings
129
+ self.base = base
130
+ inv_freq = 1.0 / (
131
+ self.base
132
+ **(torch.arange(0, self.dim, 2).float().to(device) / self.dim))
133
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
134
+
135
+ # Build here to make `torch.jit.trace` work.
136
+ self._set_cos_sin_cache(
137
+ seq_len=max_position_embeddings,
138
+ device=self.inv_freq.device,
139
+ dtype=torch.get_default_dtype())
140
+
141
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
142
+ self.max_seq_len_cached = seq_len
143
+ t = torch.arange(
144
+ self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
145
+
146
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
147
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
148
+ emb = torch.cat((freqs, freqs), dim=-1)
149
+ self.register_buffer(
150
+ 'cos_cached', emb.cos().to(dtype), persistent=False)
151
+ self.register_buffer(
152
+ 'sin_cached', emb.sin().to(dtype), persistent=False)
153
+
154
+ def forward(self, x, seq_len=None):
155
+ # x: [bs, num_attention_heads, seq_len, head_size]
156
+ if seq_len > self.max_seq_len_cached:
157
+ self._set_cos_sin_cache(
158
+ seq_len=seq_len, device=x.device, dtype=x.dtype)
159
+
160
+ return (
161
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
162
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
163
+ )
164
+
165
+
166
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
167
+ """InternLM2RotaryEmbedding extended with linear scaling.
168
+
169
+ Credits to the Reddit user /u/kaiokendev
170
+ """
171
+
172
+ def __init__(self,
173
+ dim,
174
+ max_position_embeddings=2048,
175
+ base=10000,
176
+ device=None,
177
+ scaling_factor=1.0):
178
+ self.scaling_factor = scaling_factor
179
+ super().__init__(dim, max_position_embeddings, base, device)
180
+
181
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
182
+ self.max_seq_len_cached = seq_len
183
+ t = torch.arange(
184
+ self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
185
+ t = t / self.scaling_factor
186
+
187
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
188
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
189
+ emb = torch.cat((freqs, freqs), dim=-1)
190
+ self.register_buffer(
191
+ 'cos_cached', emb.cos().to(dtype), persistent=False)
192
+ self.register_buffer(
193
+ 'sin_cached', emb.sin().to(dtype), persistent=False)
194
+
195
+
196
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
197
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
198
+
199
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
200
+ """
201
+
202
+ def __init__(self,
203
+ dim,
204
+ max_position_embeddings=2048,
205
+ base=10000,
206
+ device=None,
207
+ scaling_factor=1.0):
208
+ self.scaling_factor = scaling_factor
209
+ super().__init__(dim, max_position_embeddings, base, device)
210
+
211
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
212
+ self.max_seq_len_cached = seq_len
213
+
214
+ if seq_len > self.max_position_embeddings:
215
+ base = self.base * ((self.scaling_factor * seq_len /
216
+ self.max_position_embeddings) -
217
+ (self.scaling_factor - 1))**(
218
+ self.dim / (self.dim - 2))
219
+ inv_freq = 1.0 / (
220
+ base
221
+ **(torch.arange(0, self.dim, 2).float().to(device) / self.dim))
222
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
223
+
224
+ t = torch.arange(
225
+ self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
226
+
227
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
228
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
229
+ emb = torch.cat((freqs, freqs), dim=-1)
230
+ self.register_buffer(
231
+ 'cos_cached', emb.cos().to(dtype), persistent=False)
232
+ self.register_buffer(
233
+ 'sin_cached', emb.sin().to(dtype), persistent=False)
234
+
235
+
236
+ def rotate_half(x):
237
+ """Rotates half the hidden dims of the input."""
238
+ x1 = x[..., :x.shape[-1] // 2]
239
+ x2 = x[..., x.shape[-1] // 2:]
240
+ return torch.cat((-x2, x1), dim=-1)
241
+
242
+
243
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
244
+ # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
245
+ cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
246
+ sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
247
+ cos = cos.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1)
248
+ sin = sin.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1)
249
+ if q.size(2) == 1:
250
+ q_embed = (q * cos[:, :, -1:, :]) + (
251
+ rotate_half(q) * sin[:, :, -1:, :])
252
+ else:
253
+ q_embed = (q * cos) + (rotate_half(q) * sin)
254
+
255
+ if k.size(2) == 1:
256
+ k_embed = (k * cos[:, :, -1:, :]) + (
257
+ rotate_half(k) * sin[:, :, -1:, :])
258
+ else:
259
+ k_embed = (k * cos) + (rotate_half(k) * sin)
260
+
261
+ return q_embed, k_embed
262
+
263
+
264
+ class InternLM2MLP(nn.Module):
265
+
266
+ def __init__(self, config):
267
+ super().__init__()
268
+ self.config = config
269
+ self.hidden_size = config.hidden_size
270
+ self.intermediate_size = config.intermediate_size
271
+ #self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
272
+ #self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
273
+ #self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
274
+
275
+ self.w1 = PLoRA(
276
+ self.hidden_size,
277
+ self.intermediate_size,
278
+ bias=False,
279
+ lora_r=256,
280
+ lora_alpha=256,
281
+ lora_len=576)
282
+ self.w3 = PLoRA(
283
+ self.hidden_size,
284
+ self.intermediate_size,
285
+ bias=False,
286
+ lora_r=256,
287
+ lora_alpha=256,
288
+ lora_len=576)
289
+ self.w2 = PLoRA(
290
+ self.intermediate_size,
291
+ self.hidden_size,
292
+ bias=False,
293
+ lora_r=256,
294
+ lora_alpha=256,
295
+ lora_len=576)
296
+
297
+ self.act_fn = ACT2FN[config.hidden_act]
298
+
299
+ def forward(self, x, im_mask):
300
+ down_proj = self.w2(
301
+ self.act_fn(self.w1(x, im_mask)) * self.w3(x, im_mask), im_mask)
302
+
303
+ return down_proj
304
+
305
+
306
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
307
+ """This is the equivalent of torch.repeat_interleave(x, dim=1,
308
+ repeats=n_rep).
309
+
310
+ The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to
311
+ (batch, num_attention_heads, seqlen, head_dim)
312
+ """
313
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
314
+ if n_rep == 1:
315
+ return hidden_states
316
+ hidden_states = hidden_states[:, :,
317
+ None, :, :].expand(batch,
318
+ num_key_value_heads,
319
+ n_rep, slen, head_dim)
320
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen,
321
+ head_dim)
322
+
323
+
324
+ class InternLM2Attention(nn.Module):
325
+ """Multi-headed attention from 'Attention Is All You Need' paper."""
326
+
327
+ def __init__(self, config: InternLM2Config):
328
+ super().__init__()
329
+ self.config = config
330
+ self.hidden_size = config.hidden_size
331
+ self.num_heads = config.num_attention_heads
332
+ self.head_dim = self.hidden_size // self.num_heads
333
+ self.num_key_value_heads = config.num_key_value_heads
334
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
335
+ self.max_position_embeddings = config.max_position_embeddings
336
+ self.is_causal = True
337
+
338
+ if (self.head_dim * self.num_heads) != self.hidden_size:
339
+ raise ValueError(
340
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
341
+ f' and `num_heads`: {self.num_heads}).')
342
+
343
+ #self.wqkv = nn.Linear(
344
+ self.wqkv = PLoRA(
345
+ self.hidden_size,
346
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
347
+ bias=config.bias,
348
+ lora_r=256,
349
+ lora_alpha=256,
350
+ lora_len=576)
351
+
352
+ #self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
353
+ self.wo = PLoRA(
354
+ self.num_heads * self.head_dim,
355
+ self.hidden_size,
356
+ bias=config.bias,
357
+ lora_r=256,
358
+ lora_alpha=256,
359
+ lora_len=576)
360
+ self._init_rope()
361
+
362
+ def _init_rope(self):
363
+ if self.config.rope_scaling is None:
364
+ self.rotary_emb = InternLM2RotaryEmbedding(
365
+ self.head_dim,
366
+ max_position_embeddings=self.max_position_embeddings,
367
+ base=self.config.rope_theta,
368
+ )
369
+ else:
370
+ scaling_type = self.config.rope_scaling['type']
371
+ scaling_factor = self.config.rope_scaling['factor']
372
+ if scaling_type == 'dynamic':
373
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
374
+ self.head_dim,
375
+ max_position_embeddings=self.max_position_embeddings,
376
+ base=self.config.rope_theta,
377
+ scaling_factor=scaling_factor)
378
+ else:
379
+ raise ValueError(
380
+ "Currently we only support rotary embedding's type being 'dynamic'."
381
+ )
382
+ return self.rotary_emb
383
+
384
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
385
+ return tensor.view(bsz, seq_len, self.num_heads,
386
+ self.head_dim).transpose(1, 2).contiguous()
387
+
388
+ def forward(
389
+ self,
390
+ hidden_states: torch.Tensor,
391
+ attention_mask: Optional[torch.Tensor] = None,
392
+ position_ids: Optional[torch.LongTensor] = None,
393
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
394
+ output_attentions: bool = False,
395
+ use_cache: bool = False,
396
+ im_mask: Optional[Tuple[torch.Tensor]] = None,
397
+ **kwargs,
398
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor],
399
+ Optional[Tuple[torch.Tensor]]]:
400
+ if 'padding_mask' in kwargs:
401
+ warnings.warn(
402
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
403
+ 'Please make sure use `attention_mask` instead.`')
404
+
405
+ bsz, q_len, _ = hidden_states.size()
406
+
407
+ qkv_states = self.wqkv(hidden_states, im_mask)
408
+
409
+ qkv_states = rearrange(
410
+ qkv_states,
411
+ 'b q (h gs d) -> b q h gs d',
412
+ gs=2 + self.num_key_value_groups,
413
+ d=self.head_dim,
414
+ )
415
+
416
+ query_states = qkv_states[..., :self.num_key_value_groups, :]
417
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
418
+ key_states = qkv_states[..., -2, :]
419
+ value_states = qkv_states[..., -1, :]
420
+
421
+ query_states = query_states.transpose(1, 2)
422
+ key_states = key_states.transpose(1, 2)
423
+ value_states = value_states.transpose(1, 2)
424
+
425
+ kv_seq_len = key_states.shape[-2]
426
+ if past_key_value is not None:
427
+ kv_seq_len += past_key_value[0].shape[-2]
428
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
429
+ query_states, key_states = apply_rotary_pos_emb(
430
+ query_states, key_states, cos, sin, position_ids)
431
+
432
+ if past_key_value is not None:
433
+ # reuse k, v, self_attention
434
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
435
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
436
+
437
+ past_key_value = (key_states, value_states) if use_cache else None
438
+
439
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
440
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
441
+
442
+ attn_weights = torch.matmul(query_states, key_states.transpose(
443
+ 2, 3)) / math.sqrt(self.head_dim)
444
+
445
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
446
+ raise ValueError(
447
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
448
+ f' {attn_weights.size()}')
449
+
450
+ if attention_mask is not None:
451
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
452
+ raise ValueError(
453
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
454
+ )
455
+ attn_weights = attn_weights + attention_mask
456
+
457
+ # upcast attention to fp32
458
+ attn_weights = nn.functional.softmax(
459
+ attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
460
+ attn_output = torch.matmul(attn_weights, value_states)
461
+
462
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
463
+ raise ValueError(
464
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
465
+ f' {attn_output.size()}')
466
+
467
+ attn_output = attn_output.transpose(1, 2).contiguous()
468
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
469
+
470
+ attn_output = self.wo(attn_output, im_mask)
471
+
472
+ if not output_attentions:
473
+ attn_weights = None
474
+
475
+ return attn_output, attn_weights, past_key_value
476
+
477
+
478
+ class InternLM2FlashAttention2(InternLM2Attention):
479
+ """InternLM2 flash attention module.
480
+
481
+ This module inherits from `InternLM2Attention` as the weights of the module
482
+ stays untouched. The only required change would be on the forward pass
483
+ where it needs to correctly call the public API of flash attention and deal
484
+ with padding tokens in case the input contains any of them.
485
+ """
486
+
487
+ def forward(
488
+ self,
489
+ hidden_states: torch.Tensor,
490
+ attention_mask: Optional[torch.LongTensor] = None,
491
+ position_ids: Optional[torch.LongTensor] = None,
492
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
493
+ output_attentions: bool = False,
494
+ use_cache: bool = False,
495
+ im_mask: Optional[Tuple[torch.Tensor]] = None,
496
+ **kwargs,
497
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor],
498
+ Optional[Tuple[torch.Tensor]]]:
499
+ # InternLM2FlashAttention2 attention does not support output_attentions
500
+ if 'padding_mask' in kwargs:
501
+ warnings.warn(
502
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
503
+ 'Please make sure use `attention_mask` instead.`')
504
+
505
+ # overwrite attention_mask with padding_mask
506
+ attention_mask = kwargs.pop('padding_mask')
507
+
508
+ output_attentions = False
509
+
510
+ bsz, q_len, _ = hidden_states.size()
511
+
512
+ qkv_states = self.wqkv(hidden_states, im_mask)
513
+
514
+ qkv_states = rearrange(
515
+ qkv_states,
516
+ 'b q (h gs d) -> b q h gs d',
517
+ gs=self.num_heads + 2 * self.num_key_value_heads,
518
+ d=self.head_dim,
519
+ q=q_len,
520
+ )
521
+
522
+ query_states = qkv_states[..., :self.num_key_value_groups, :]
523
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
524
+ key_states = qkv_states[..., -2, :]
525
+ value_states = qkv_states[..., -1, :]
526
+
527
+ kv_seq_len = key_states.shape[-2]
528
+ if past_key_value is not None:
529
+ kv_seq_len += past_key_value[0].shape[-2]
530
+
531
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
532
+
533
+ query_states, key_states = apply_rotary_pos_emb(
534
+ query_states, key_states, cos, sin, position_ids)
535
+
536
+ if past_key_value is not None:
537
+ # reuse k, v, self_attention
538
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
539
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
540
+
541
+ past_key_value = (key_states, value_states) if use_cache else None
542
+
543
+ query_states = query_states.transpose(1, 2)
544
+ key_states = key_states.transpose(1, 2)
545
+ value_states = value_states.transpose(1, 2)
546
+
547
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
548
+
549
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
550
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
551
+ # cast them back in the correct dtype just to be sure everything works as expected.
552
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
553
+ # in fp32. (InternLM2RMSNorm handles it correctly)
554
+
555
+ input_dtype = query_states.dtype
556
+ if input_dtype == torch.float32:
557
+ # Handle the case where the model is quantized
558
+ if hasattr(self.config, '_pre_quantization_dtype'):
559
+ target_dtype = self.config._pre_quantization_dtype
560
+ else:
561
+ target_dtype = self.q_proj.weight.dtype
562
+
563
+ logger.warning_once(
564
+ f'The input hidden states seems to be silently casted in float32, this might be related to'
565
+ f' the fact you have upcasted embedding or layer norm layers in float32. We will cast back '
566
+ f'the input in {target_dtype}.')
567
+
568
+ query_states = query_states.to(target_dtype)
569
+ key_states = key_states.to(target_dtype)
570
+ value_states = value_states.to(target_dtype)
571
+
572
+ attn_output = self._flash_attention_forward(
573
+ query_states,
574
+ key_states,
575
+ value_states,
576
+ attention_mask,
577
+ q_len,
578
+ dropout=dropout_rate)
579
+
580
+ attn_output = attn_output.reshape(bsz, q_len,
581
+ self.hidden_size).contiguous()
582
+ attn_output = self.wo(attn_output, im_mask)
583
+
584
+ if not output_attentions:
585
+ attn_weights = None
586
+
587
+ return attn_output, attn_weights, past_key_value
588
+
589
+
590
+ class InternLM2DecoderLayer(nn.Module):
591
+
592
+ def __init__(self, config: InternLM2Config):
593
+ super().__init__()
594
+ self.hidden_size = config.hidden_size
595
+ self.attention = (
596
+ InternLM2Attention(config=config)
597
+ if not getattr(config, '_flash_attn_2_enabled', False) else
598
+ InternLM2FlashAttention2(config=config))
599
+ self.feed_forward = InternLM2MLP(config)
600
+ self.attention_norm = InternLM2RMSNorm(
601
+ config.hidden_size, eps=config.rms_norm_eps)
602
+ self.ffn_norm = InternLM2RMSNorm(
603
+ config.hidden_size, eps=config.rms_norm_eps)
604
+
605
+ def forward(
606
+ self,
607
+ hidden_states: torch.Tensor,
608
+ attention_mask: Optional[torch.Tensor] = None,
609
+ position_ids: Optional[torch.LongTensor] = None,
610
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
611
+ output_attentions: Optional[bool] = False,
612
+ use_cache: Optional[bool] = False,
613
+ im_mask: Optional[Tuple[torch.Tensor]] = None,
614
+ **kwargs,
615
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor,
616
+ torch.FloatTensor]]]:
617
+ """
618
+ Args:
619
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
620
+ attention_mask (`torch.FloatTensor`, *optional*):
621
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
622
+ query_sequence_length, key_sequence_length)` if default attention is used.
623
+ output_attentions (`bool`, *optional*):
624
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
625
+ returned tensors for more detail.
626
+ use_cache (`bool`, *optional*):
627
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
628
+ (see `past_key_values`).
629
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
630
+ """
631
+ if 'padding_mask' in kwargs:
632
+ warnings.warn(
633
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
634
+ 'Please make sure use `attention_mask` instead.`')
635
+
636
+ residual = hidden_states
637
+
638
+ hidden_states = self.attention_norm(hidden_states)
639
+
640
+ # Self Attention
641
+ hidden_states, self_attn_weights, present_key_value = self.attention(
642
+ hidden_states=hidden_states,
643
+ attention_mask=attention_mask,
644
+ position_ids=position_ids,
645
+ past_key_value=past_key_value,
646
+ output_attentions=output_attentions,
647
+ use_cache=use_cache,
648
+ im_mask=im_mask,
649
+ **kwargs,
650
+ )
651
+ hidden_states = residual + hidden_states
652
+
653
+ # Fully Connected
654
+ residual = hidden_states
655
+ hidden_states = self.ffn_norm(hidden_states)
656
+ hidden_states = self.feed_forward(hidden_states, im_mask)
657
+ hidden_states = residual + hidden_states
658
+
659
+ outputs = (hidden_states, )
660
+
661
+ if output_attentions:
662
+ outputs += (self_attn_weights, )
663
+
664
+ if use_cache:
665
+ outputs += (present_key_value, )
666
+
667
+ return outputs
668
+
669
+
670
+ InternLM2_START_DOCSTRING = r"""
671
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
672
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
673
+ etc.)
674
+
675
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
676
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
677
+ and behavior.
678
+
679
+ Parameters:
680
+ config ([`InternLM2Config`]):
681
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
682
+ load the weights associated with the model, only the configuration. Check out the
683
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
684
+ """
685
+
686
+
687
+ @add_start_docstrings(
688
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
689
+ InternLM2_START_DOCSTRING,
690
+ )
691
+ class InternLM2PreTrainedModel(PreTrainedModel):
692
+ config_class = InternLM2Config
693
+ base_model_prefix = 'model'
694
+ supports_gradient_checkpointing = True
695
+ _no_split_modules = ['InternLM2DecoderLayer']
696
+ _skip_keys_device_placement = 'past_key_values'
697
+ _supports_flash_attn_2 = True
698
+
699
+ def _init_weights(self, module):
700
+ std = self.config.initializer_range
701
+ if isinstance(module, nn.Linear):
702
+ module.weight.data.normal_(mean=0.0, std=std)
703
+ if module.bias is not None:
704
+ module.bias.data.zero_()
705
+ elif isinstance(module, nn.Embedding):
706
+ module.weight.data.normal_(mean=0.0, std=std)
707
+ if module.padding_idx is not None:
708
+ module.weight.data[module.padding_idx].zero_()
709
+
710
+
711
+ InternLM2_INPUTS_DOCSTRING = r"""
712
+ Args:
713
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
714
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
715
+ it.
716
+
717
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
718
+ [`PreTrainedTokenizer.__call__`] for details.
719
+
720
+ [What are input IDs?](../glossary#input-ids)
721
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
722
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
723
+
724
+ - 1 for tokens that are **not masked**,
725
+ - 0 for tokens that are **masked**.
726
+
727
+ [What are attention masks?](../glossary#attention-mask)
728
+
729
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
730
+ [`PreTrainedTokenizer.__call__`] for details.
731
+
732
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
733
+ `past_key_values`).
734
+
735
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
736
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
737
+ information on the default strategy.
738
+
739
+ - 1 indicates the head is **not masked**,
740
+ - 0 indicates the head is **masked**.
741
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
742
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
743
+ config.n_positions - 1]`.
744
+
745
+ [What are position IDs?](../glossary#position-ids)
746
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
747
+ when `config.use_cache=True`):
748
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
749
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
750
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
751
+
752
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
753
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
754
+
755
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
756
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
757
+ of shape `(batch_size, sequence_length)`.
758
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
759
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
760
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
761
+ model's internal embedding lookup matrix.
762
+ use_cache (`bool`, *optional*):
763
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
764
+ `past_key_values`).
765
+ output_attentions (`bool`, *optional*):
766
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
767
+ tensors for more detail.
768
+ output_hidden_states (`bool`, *optional*):
769
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
770
+ more detail.
771
+ return_dict (`bool`, *optional*):
772
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
773
+ """
774
+
775
+
776
+ @add_start_docstrings(
777
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
778
+ InternLM2_START_DOCSTRING,
779
+ )
780
+ class InternLM2Model(InternLM2PreTrainedModel):
781
+ """Transformer decoder consisting of *config.num_hidden_layers* layers.
782
+ Each layer is a [`InternLM2DecoderLayer`]
783
+
784
+ Args:
785
+ config: InternLM2Config
786
+ """
787
+
788
+ _auto_class = 'AutoModel'
789
+
790
+ def __init__(self, config: InternLM2Config):
791
+ super().__init__(config)
792
+ self.padding_idx = config.pad_token_id
793
+ self.vocab_size = config.vocab_size
794
+
795
+ self.tok_embeddings = nn.Embedding(config.vocab_size,
796
+ config.hidden_size,
797
+ self.padding_idx)
798
+ self.layers = nn.ModuleList([
799
+ InternLM2DecoderLayer(config)
800
+ for _ in range(config.num_hidden_layers)
801
+ ])
802
+ self.norm = InternLM2RMSNorm(
803
+ config.hidden_size, eps=config.rms_norm_eps)
804
+
805
+ self.gradient_checkpointing = False
806
+ # Initialize weights and apply final processing
807
+ self.post_init()
808
+
809
+ def get_input_embeddings(self):
810
+ return self.tok_embeddings
811
+
812
+ def set_input_embeddings(self, value):
813
+ self.tok_embeddings = value
814
+
815
+ # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
816
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape,
817
+ inputs_embeds, past_key_values_length):
818
+ # create causal mask
819
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
820
+ combined_attention_mask = None
821
+ if input_shape[-1] > 1:
822
+ combined_attention_mask = _make_causal_mask(
823
+ input_shape,
824
+ inputs_embeds.dtype,
825
+ device=inputs_embeds.device,
826
+ past_key_values_length=past_key_values_length,
827
+ )
828
+
829
+ if attention_mask is not None:
830
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
831
+ expanded_attn_mask = _expand_mask(
832
+ attention_mask, inputs_embeds.dtype,
833
+ tgt_len=input_shape[-1]).to(inputs_embeds.device)
834
+ combined_attention_mask = (
835
+ expanded_attn_mask if combined_attention_mask is None else
836
+ expanded_attn_mask + combined_attention_mask)
837
+
838
+ return combined_attention_mask
839
+
840
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
841
+ def forward(self,
842
+ input_ids: torch.LongTensor = None,
843
+ attention_mask: Optional[torch.Tensor] = None,
844
+ position_ids: Optional[torch.LongTensor] = None,
845
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
846
+ inputs_embeds: Optional[torch.FloatTensor] = None,
847
+ use_cache: Optional[bool] = None,
848
+ output_attentions: Optional[bool] = None,
849
+ output_hidden_states: Optional[bool] = None,
850
+ return_dict: Optional[bool] = None,
851
+ **kwargs) -> Union[Tuple, BaseModelOutputWithPast]:
852
+
853
+ im_mask = kwargs.get('im_mask', None)
854
+
855
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
856
+ output_hidden_states = (
857
+ output_hidden_states if output_hidden_states is not None else
858
+ self.config.output_hidden_states)
859
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
860
+
861
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
862
+
863
+ # retrieve input_ids and inputs_embeds
864
+ if input_ids is not None and inputs_embeds is not None:
865
+ raise ValueError(
866
+ 'You cannot specify both input_ids and inputs_embeds at the same time'
867
+ )
868
+ elif input_ids is not None:
869
+ batch_size, seq_length = input_ids.shape[:2]
870
+ elif inputs_embeds is not None:
871
+ batch_size, seq_length = inputs_embeds.shape[:2]
872
+ else:
873
+ raise ValueError(
874
+ 'You have to specify either input_ids or inputs_embeds')
875
+
876
+ seq_length_with_past = seq_length
877
+ past_key_values_length = 0
878
+ if past_key_values is not None:
879
+ past_key_values_length = past_key_values[0][0].shape[2]
880
+ seq_length_with_past = seq_length_with_past + past_key_values_length
881
+
882
+ if position_ids is None:
883
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
884
+ position_ids = torch.arange(
885
+ past_key_values_length,
886
+ seq_length + past_key_values_length,
887
+ dtype=torch.long,
888
+ device=device)
889
+ position_ids = position_ids.unsqueeze(0)
890
+
891
+ if inputs_embeds is None:
892
+ inputs_embeds = self.tok_embeddings(input_ids)
893
+ im_mask = torch.zeros(inputs_embeds.shape[:2]).to(
894
+ inputs_embeds.device).bool()
895
+ # embed positions
896
+ if attention_mask is None:
897
+ attention_mask = torch.ones((batch_size, seq_length_with_past),
898
+ dtype=torch.bool,
899
+ device=inputs_embeds.device)
900
+ attention_mask = self._prepare_decoder_attention_mask(
901
+ attention_mask, (batch_size, seq_length), inputs_embeds,
902
+ past_key_values_length)
903
+
904
+ # embed positions
905
+ hidden_states = inputs_embeds
906
+
907
+ if self.gradient_checkpointing and self.training:
908
+ if use_cache:
909
+ logger.warning_once(
910
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
911
+ )
912
+ use_cache = False
913
+
914
+ # decoder layers
915
+ all_hidden_states = () if output_hidden_states else None
916
+ all_self_attns = () if output_attentions else None
917
+ next_decoder_cache = () if use_cache else None
918
+
919
+ for idx, decoder_layer in enumerate(self.layers):
920
+ if output_hidden_states:
921
+ all_hidden_states += (hidden_states, )
922
+
923
+ past_key_value = past_key_values[
924
+ idx] if past_key_values is not None else None
925
+
926
+ if self.gradient_checkpointing and self.training:
927
+
928
+ def create_custom_forward(module):
929
+
930
+ def custom_forward(*inputs):
931
+ # None for past_key_value
932
+ return module(*inputs, output_attentions, None,
933
+ im_mask)
934
+
935
+ return custom_forward
936
+
937
+ layer_outputs = torch.utils.checkpoint.checkpoint(
938
+ create_custom_forward(decoder_layer),
939
+ hidden_states,
940
+ attention_mask,
941
+ position_ids,
942
+ None,
943
+ )
944
+ else:
945
+ layer_outputs = decoder_layer(
946
+ hidden_states,
947
+ attention_mask=attention_mask,
948
+ position_ids=position_ids,
949
+ past_key_value=past_key_value,
950
+ output_attentions=output_attentions,
951
+ use_cache=use_cache,
952
+ im_mask=im_mask,
953
+ )
954
+
955
+ hidden_states = layer_outputs[0]
956
+
957
+ if use_cache:
958
+ next_decoder_cache += (
959
+ layer_outputs[2 if output_attentions else 1], )
960
+
961
+ if output_attentions:
962
+ all_self_attns += (layer_outputs[1], )
963
+
964
+ hidden_states = self.norm(hidden_states)
965
+
966
+ # add hidden states from the last decoder layer
967
+ if output_hidden_states:
968
+ all_hidden_states += (hidden_states, )
969
+
970
+ next_cache = next_decoder_cache if use_cache else None
971
+ if not return_dict:
972
+ return tuple(
973
+ v for v in
974
+ [hidden_states, next_cache, all_hidden_states, all_self_attns]
975
+ if v is not None)
976
+ return BaseModelOutputWithPast(
977
+ last_hidden_state=hidden_states,
978
+ past_key_values=next_cache,
979
+ hidden_states=all_hidden_states,
980
+ attentions=all_self_attns,
981
+ )
982
+
983
+
984
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
985
+ _auto_class = 'AutoModelForCausalLM'
986
+
987
+ _tied_weights_keys = ['output.weight']
988
+
989
+ def __init__(self, config):
990
+ super().__init__(config)
991
+ self.model = InternLM2Model(config)
992
+ self.vocab_size = config.vocab_size
993
+ self.output = nn.Linear(
994
+ config.hidden_size, config.vocab_size, bias=False)
995
+ self.debug_flag = 1
996
+ self.tokenizer = None
997
+
998
+ self.max_length = config.max_length
999
+ print(f'Set max length to {self.max_length}')
1000
+ self.debug_flag = 1
1001
+ # Initialize weights and apply final processing
1002
+ self.post_init()
1003
+
1004
+ self.vit = build_vision_tower()
1005
+ self.vision_proj = build_vision_projector()
1006
+
1007
+ self.vis_processor = transforms.Compose([
1008
+ transforms.Resize((336, 336),
1009
+ interpolation=InterpolationMode.BICUBIC),
1010
+ transforms.ToTensor(),
1011
+ transforms.Normalize((0.48145466, 0.4578275, 0.40821073),
1012
+ (0.26862954, 0.26130258, 0.27577711)),
1013
+ ])
1014
+
1015
+ def _set_gradient_checkpointing(self, module, value=False):
1016
+ if isinstance(module, InternLM2Model):
1017
+ module.gradient_checkpointing = value
1018
+ if value:
1019
+ self.vit.vision_tower.vision_model.encoder.gradient_checkpointing = value
1020
+
1021
+ def get_input_embeddings(self):
1022
+ return self.model.tok_embeddings
1023
+
1024
+ def set_input_embeddings(self, value):
1025
+ self.model.tok_embeddings = value
1026
+
1027
+ def get_output_embeddings(self):
1028
+ return self.output
1029
+
1030
+ def set_output_embeddings(self, new_embeddings):
1031
+ self.output = new_embeddings
1032
+
1033
+ def set_decoder(self, decoder):
1034
+ self.model = decoder
1035
+
1036
+ def get_decoder(self):
1037
+ return self.model
1038
+
1039
+ def encode_text(self, t, add_special_tokens=False):
1040
+ t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n')
1041
+ t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n')
1042
+ t = t.replace('<TOKENS_UNUSED_0>', '[UNUSED_TOKEN_145]')
1043
+ t = t.replace('<TOKENS_UNUSED_1>', '[UNUSED_TOKEN_145]')
1044
+ t = t.replace('[UNUSED_TOKEN_0]', '[UNUSED_TOKEN_145]')
1045
+ t = t.replace('[UNUSED_TOKEN_1]', '[UNUSED_TOKEN_145]')
1046
+
1047
+ text = t
1048
+ token = self.tokenizer(
1049
+ text, return_tensors='pt',
1050
+ add_special_tokens=add_special_tokens).input_ids.to(self.device)
1051
+ embs = self.model.tok_embeddings(token)
1052
+ return embs
1053
+
1054
+ def encode_img(self, image):
1055
+ if image is None:
1056
+ return None
1057
+ if isinstance(image, str):
1058
+ image = Image.open(image).convert('RGB')
1059
+ image = self.vis_processor(image).unsqueeze(0).to(self.device)
1060
+ else:
1061
+ assert isinstance(image, torch.Tensor)
1062
+
1063
+ img_embeds, atts_img, img_target = self.img2emb(image)
1064
+ return img_embeds
1065
+
1066
+ def img2emb(self, image):
1067
+ img_embeds = self.vision_proj(self.vit(image.to(self.device)))
1068
+ atts_img = torch.ones(
1069
+ img_embeds.size()[:-1], dtype=torch.long).to(img_embeds.device)
1070
+
1071
+ img_target = torch.ones(
1072
+ img_embeds.size()[:2], dtype=torch.long).to(
1073
+ img_embeds.device) * -100
1074
+
1075
+ return img_embeds, atts_img, img_target
1076
+
1077
+ def prompt_wrap(self, img_embeds, prompt):
1078
+ batch_size = img_embeds.shape[0]
1079
+ p_before, p_after = prompt.split('<ImageHere>')
1080
+ p_before_tokens = self.tokenizer(
1081
+ p_before, return_tensors='pt',
1082
+ add_special_tokens=True).to(img_embeds.device)
1083
+
1084
+ p_before_embeds = self.model.tok_embeddings(
1085
+ p_before_tokens.input_ids).expand(batch_size, -1, -1)
1086
+ wrapped_img_embeds = torch.cat([p_before_embeds, img_embeds], dim=1)
1087
+
1088
+ wrapped_atts_img = torch.ones(
1089
+ wrapped_img_embeds.size()[:-1],
1090
+ dtype=torch.long).to(img_embeds.device)
1091
+
1092
+ wrapped_target = torch.ones(
1093
+ batch_size, wrapped_img_embeds.shape[1], dtype=torch.long).to(
1094
+ img_embeds.device) * -100
1095
+
1096
+ return wrapped_img_embeds, wrapped_atts_img, wrapped_target
1097
+
1098
+ def text2emb(self, text, add_special=False):
1099
+ if type(text) == str:
1100
+ new_text = []
1101
+ for t in text:
1102
+ t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n')
1103
+ t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n')
1104
+ t = t.replace('<TOKENS_UNUSED_0>', '[UNUSED_TOKEN_145]')
1105
+ t = t.replace('<TOKENS_UNUSED_1>', '[UNUSED_TOKEN_145]')
1106
+ new_text.append(t)
1107
+ text = new_text
1108
+ elif type(text) == list:
1109
+ new_text = []
1110
+ text_list = text
1111
+ for text in text_list:
1112
+ for t in text:
1113
+ t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n')
1114
+ t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n')
1115
+ t = t.replace('<TOKENS_UNUSED_0>', '[UNUSED_TOKEN_145]')
1116
+ t = t.replace('<TOKENS_UNUSED_1>', '[UNUSED_TOKEN_145]')
1117
+ new_text.append(t)
1118
+ text = new_text
1119
+ to_regress_tokens = self.tokenizer(
1120
+ text,
1121
+ return_tensors='pt',
1122
+ padding='longest',
1123
+ truncation=True,
1124
+ max_length=self.max_length,
1125
+ add_special_tokens=add_special).to(self.device)
1126
+
1127
+ targets = self.mask_human_targets(to_regress_tokens.input_ids)
1128
+ targets = targets.to(self.device)
1129
+
1130
+ return to_regress_tokens, targets
1131
+
1132
+ def interleav_wrap(self, img_list, text_list):
1133
+ wrap_embeds_list, wrap_atts_list = [], []
1134
+ wrap_target_list, wrap_im_mask_list = [], []
1135
+
1136
+ for image, text in zip(img_list, text_list):
1137
+ img_embeds, atts_img, img_target = self.img2emb(image)
1138
+ text = text[0]
1139
+ parts = text.split('<ImageHere>')
1140
+ wrap_tokens, wrap_embeds, wrap_atts, wrap_im_mask = [], [], [], []
1141
+ temp_len = 0
1142
+ image_nums, im_len = img_embeds.shape[:2]
1143
+ need_bos = True
1144
+ for idx, part in enumerate(parts):
1145
+ if len(part) > 0:
1146
+ part_tokens = self.tokenizer(
1147
+ part,
1148
+ return_tensors='pt',
1149
+ padding='longest',
1150
+ add_special_tokens=need_bos).to(self.device)
1151
+ if need_bos:
1152
+ need_bos = False
1153
+ wrap_tokens.append(part_tokens.input_ids)
1154
+ part_embeds = self.model.tok_embeddings(
1155
+ part_tokens.input_ids)
1156
+ wrap_embeds.append(part_embeds)
1157
+ wrap_atts.append(part_tokens.attention_mask)
1158
+ wrap_im_mask.append(
1159
+ torch.zeros(part_embeds.shape[:2]).to(self.device))
1160
+
1161
+ temp_len += part_embeds.shape[1]
1162
+ if idx < image_nums:
1163
+ wrap_tokens.append(img_target[idx].unsqueeze(0))
1164
+ wrap_embeds.append(img_embeds[idx].unsqueeze(0))
1165
+ wrap_atts.append(atts_img[idx].unsqueeze(0))
1166
+ wrap_im_mask.append(
1167
+ torch.ones_like(atts_img[idx].unsqueeze(0)))
1168
+
1169
+ temp_len += im_len
1170
+ if temp_len > self.max_length:
1171
+ break
1172
+
1173
+ wrap_tokens = torch.cat(wrap_tokens, dim=1)
1174
+ wrap_embeds = torch.cat(wrap_embeds, dim=1)
1175
+ wrap_atts = torch.cat(wrap_atts, dim=1)
1176
+ wrap_im_mask = torch.cat(wrap_im_mask, dim=1)
1177
+
1178
+ wrap_target = self.mask_human_targets(wrap_tokens).to(self.device)
1179
+
1180
+ wrap_embeds = wrap_embeds[:, :self.max_length].to(self.device)
1181
+ wrap_atts = wrap_atts[:, :self.max_length].to(self.device)
1182
+ wrap_target = wrap_target[:, :self.max_length].to(self.device)
1183
+ wrap_im_mask = wrap_im_mask[:, :self.max_length].to(self.device)
1184
+
1185
+ wrap_embeds_list.append(wrap_embeds)
1186
+ wrap_atts_list.append(wrap_atts)
1187
+ wrap_target_list.append(wrap_target)
1188
+ wrap_im_mask_list.append(wrap_im_mask)
1189
+
1190
+ wrap_embeds = torch.cat(wrap_embeds_list)
1191
+ wrap_atts = torch.cat(wrap_atts_list)
1192
+ wrap_target = torch.cat(wrap_target_list)
1193
+ wrap_im_mask = torch.cat(wrap_im_mask_list)
1194
+ return wrap_embeds, wrap_atts, wrap_target, wrap_im_mask
1195
+
1196
+ def mask_human_targets(self, input_ids, pure=False):
1197
+ target_batch = []
1198
+ for bs in range(input_ids.shape[0]):
1199
+ cur_idx = 0
1200
+ ids = input_ids[bs]
1201
+ targets = copy.deepcopy(ids)
1202
+ end_count = 0
1203
+ last_eoa = 0
1204
+ for i, temp_id in enumerate(ids):
1205
+ if temp_id == 92542:
1206
+ if end_count % 2 == 0:
1207
+ targets[last_eoa:i + 6] = -100
1208
+ else:
1209
+ last_eoa = i + 1
1210
+ end_count += 1
1211
+ elif temp_id == 2: ### eos and following pad
1212
+ targets[i + 1:] = -100 #### loss on eos, but not on pad
1213
+ break
1214
+ if temp_id != 2 and end_count % 2 == 0: ### trunction, end at last question
1215
+ targets[last_eoa +
1216
+ 1:] = -100 #### mask all after the last answer
1217
+
1218
+ target_batch.append(targets.unsqueeze(0))
1219
+ if self.debug_flag:
1220
+ print('#### Warning! System meta is not support now')
1221
+ targets_vis = targets.clone()
1222
+ targets_vis[targets_vis == -100] = 92399
1223
+ targets_vis_tokens = ''.join(
1224
+ self.tokenizer.convert_ids_to_tokens(targets_vis)).replace(
1225
+ '[UNUSED_TOKEN_2]', ' ')
1226
+ # print(''.join(self.tokenizer.convert_ids_to_tokens(ids)))
1227
+ print('-----------')
1228
+ print([targets_vis_tokens])
1229
+ print('-----------------------------')
1230
+
1231
+ target_batch = torch.cat(target_batch, dim=0)
1232
+ return target_batch
1233
+
1234
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1235
+ @replace_return_docstrings(
1236
+ output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1237
+ def forward(self,
1238
+ input_ids: torch.LongTensor = None,
1239
+ attention_mask: Optional[torch.Tensor] = None,
1240
+ position_ids: Optional[torch.LongTensor] = None,
1241
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1242
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1243
+ labels: Optional[torch.LongTensor] = None,
1244
+ use_cache: Optional[bool] = None,
1245
+ output_attentions: Optional[bool] = None,
1246
+ output_hidden_states: Optional[bool] = None,
1247
+ return_dict: Optional[bool] = None,
1248
+ **kwargs) -> Union[Tuple, CausalLMOutputWithPast]:
1249
+ r"""
1250
+ Args:
1251
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1252
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1253
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1254
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1255
+
1256
+ Returns:
1257
+
1258
+ Example:
1259
+
1260
+ ```python
1261
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1262
+
1263
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1264
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1265
+
1266
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1267
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1268
+
1269
+ >>> # Generate
1270
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1271
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1272
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1273
+ ```"""
1274
+ samples = kwargs.get('samples', None)
1275
+ if samples:
1276
+ if self.debug_flag:
1277
+ self.debug_flag += 1
1278
+ if self.debug_flag > 5:
1279
+ self.debug_flag = 0
1280
+
1281
+ if samples['data_type'][0] == 'text':
1282
+ has_img = False
1283
+ elif samples['data_type'][0] == 'multi':
1284
+ has_img = True
1285
+ else:
1286
+ raise NotImplementedError
1287
+
1288
+ ### encode text
1289
+ text = samples['text_input']
1290
+ if has_img:
1291
+ image = samples['image']
1292
+ to_regress_embeds, attention_mask, targets, im_mask = self.interleav_wrap(
1293
+ image, text)
1294
+ else:
1295
+ to_regress_tokens, targets = self.text2emb(
1296
+ text, add_special=True)
1297
+ to_regress_embeds = self.model.tok_embeddings(
1298
+ to_regress_tokens.input_ids)
1299
+ attention_mask = to_regress_tokens.attention_mask
1300
+ im_mask = torch.zeros(to_regress_embeds.shape[:2]).cuda()
1301
+
1302
+ inputs_embeds = to_regress_embeds[:, :self.max_length]
1303
+ attention_mask = attention_mask[:, :self.max_length]
1304
+ targets = targets[:, :self.max_length]
1305
+ im_mask = im_mask[:, :self.max_length].bool()
1306
+ labels = targets
1307
+ if self.debug_flag:
1308
+ print(targets.shape, inputs_embeds.shape, attention_mask.shape)
1309
+ le = len(samples['text_input'])
1310
+ data_type = samples['data_type'][0]
1311
+ print(
1312
+ f'DataType: {data_type}. Has Image: {has_img}. Current max length: {self.max_length}, BatchSize is {le}'
1313
+ )
1314
+ # if has_img:
1315
+ # print(img_embeds.shape)
1316
+
1317
+ else:
1318
+ self.debug_flag = 0
1319
+ im_mask = kwargs.get('im_mask', None)
1320
+
1321
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1322
+ output_hidden_states = (
1323
+ output_hidden_states if output_hidden_states is not None else
1324
+ self.config.output_hidden_states)
1325
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1326
+
1327
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1328
+ outputs = self.model(
1329
+ input_ids=input_ids,
1330
+ attention_mask=attention_mask,
1331
+ position_ids=position_ids,
1332
+ past_key_values=past_key_values,
1333
+ inputs_embeds=inputs_embeds,
1334
+ use_cache=use_cache,
1335
+ output_attentions=output_attentions,
1336
+ output_hidden_states=output_hidden_states,
1337
+ return_dict=return_dict,
1338
+ im_mask=im_mask,
1339
+ )
1340
+
1341
+ hidden_states = outputs[0]
1342
+ logits = self.output(hidden_states)
1343
+ logits = logits.float()
1344
+
1345
+ loss = None
1346
+ if labels is not None:
1347
+ # Shift so that tokens < n predict n
1348
+ shift_logits = logits[..., :-1, :].contiguous()
1349
+ shift_labels = labels[..., 1:].contiguous()
1350
+ # Flatten the tokens
1351
+ loss_fct = CrossEntropyLoss()
1352
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1353
+ shift_labels = shift_labels.view(-1)
1354
+ # Enable model parallelism
1355
+ shift_labels = shift_labels.to(shift_logits.device)
1356
+ loss = loss_fct(shift_logits, shift_labels)
1357
+
1358
+ if not return_dict:
1359
+ output = (logits, ) + outputs[1:]
1360
+ return (loss, ) + output if loss is not None else output
1361
+
1362
+ return CausalLMOutputWithPast(
1363
+ loss=loss,
1364
+ logits=logits,
1365
+ past_key_values=outputs.past_key_values,
1366
+ hidden_states=outputs.hidden_states,
1367
+ attentions=outputs.attentions,
1368
+ )
1369
+
1370
+ def prepare_inputs_for_generation(self,
1371
+ input_ids,
1372
+ past_key_values=None,
1373
+ attention_mask=None,
1374
+ inputs_embeds=None,
1375
+ im_mask=None,
1376
+ **kwargs):
1377
+ if past_key_values is not None:
1378
+ past_length = past_key_values[0][0].shape[2]
1379
+
1380
+ # Some generation methods already pass only the last input ID
1381
+ if input_ids.shape[1] > past_length:
1382
+ remove_prefix_length = past_length
1383
+ else:
1384
+ # Default to old behavior: keep only final ID
1385
+ remove_prefix_length = input_ids.shape[1] - 1
1386
+
1387
+ input_ids = input_ids[:, remove_prefix_length:]
1388
+
1389
+ position_ids = kwargs.get('position_ids', None)
1390
+ if attention_mask is not None and position_ids is None:
1391
+ # create position_ids on the fly for batch generation
1392
+ position_ids = attention_mask.long().cumsum(-1) - 1
1393
+ position_ids.masked_fill_(attention_mask == 0, 1)
1394
+ if past_key_values:
1395
+ position_ids = position_ids[:, -input_ids.shape[1]:]
1396
+
1397
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1398
+ if inputs_embeds is not None and past_key_values is None:
1399
+ model_inputs = {'inputs_embeds': inputs_embeds}
1400
+ else:
1401
+ model_inputs = {'input_ids': input_ids}
1402
+
1403
+ im_mask = im_mask
1404
+
1405
+ model_inputs.update({
1406
+ 'position_ids': position_ids,
1407
+ 'past_key_values': past_key_values,
1408
+ 'use_cache': kwargs.get('use_cache'),
1409
+ 'attention_mask': attention_mask,
1410
+ 'im_mask': im_mask,
1411
+ })
1412
+ return model_inputs
1413
+
1414
+ @staticmethod
1415
+ def _reorder_cache(past_key_values, beam_idx):
1416
+ reordered_past = ()
1417
+ for layer_past in past_key_values:
1418
+ reordered_past += (tuple(
1419
+ past_state.index_select(0, beam_idx.to(past_state.device))
1420
+ for past_state in layer_past), )
1421
+ return reordered_past
1422
+
1423
+ def build_inputs(self,
1424
+ tokenizer,
1425
+ query: str,
1426
+ history: List[Tuple[str, str]] = []):
1427
+ prompt = ''
1428
+ for record in history:
1429
+ prompt += f"""<|User|>:{record[0]}\n<|Bot|>:{record[1]}[UNUSED_TOKEN_0]\n"""
1430
+ prompt += f"""<|User|>:{query}\n<|Bot|>:"""
1431
+ return tokenizer([prompt], return_tensors='pt')
1432
+
1433
+ @torch.no_grad()
1434
+ def chat(
1435
+ self,
1436
+ tokenizer,
1437
+ query: str,
1438
+ history: List[Tuple[str, str]] = [],
1439
+ streamer: Optional[BaseStreamer] = None,
1440
+ max_new_tokens: int = 1024,
1441
+ do_sample: bool = True,
1442
+ temperature: float = 0.8,
1443
+ top_p: float = 0.8,
1444
+ **kwargs,
1445
+ ):
1446
+ inputs = self.build_inputs(tokenizer, query, history)
1447
+ inputs = {
1448
+ k: v.to(self.device)
1449
+ for k, v in inputs.items() if torch.is_tensor(v)
1450
+ }
1451
+ outputs = self.generate(
1452
+ **inputs,
1453
+ streamer=streamer,
1454
+ max_new_tokens=max_new_tokens,
1455
+ do_sample=do_sample,
1456
+ temperature=temperature,
1457
+ top_p=top_p,
1458
+ **kwargs,
1459
+ )
1460
+ outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]):]
1461
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1462
+ response = response.split('[UNUSED_TOKEN_0]')[0]
1463
+ history = history + [(query, response)]
1464
+ return response, history
1465
+
1466
+ @torch.no_grad()
1467
+ def stream_chat(
1468
+ self,
1469
+ tokenizer,
1470
+ query: str,
1471
+ history: List[Tuple[str, str]] = [],
1472
+ max_new_tokens: int = 1024,
1473
+ do_sample: bool = True,
1474
+ temperature: float = 0.8,
1475
+ top_p: float = 0.8,
1476
+ **kwargs,
1477
+ ):
1478
+ """Return a generator in format: (response, history) Eg.
1479
+
1480
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')]) ('你好,有什么可以帮助您的吗?', [('你好',
1481
+ '你好,有什么可以帮助您的吗?')])
1482
+ """
1483
+ if BaseStreamer is None:
1484
+ raise ModuleNotFoundError(
1485
+ 'The version of `transformers` is too low. Please make sure '
1486
+ 'that you have installed `transformers>=4.28.0`.')
1487
+
1488
+ response_queue = queue.Queue(maxsize=20)
1489
+
1490
+ class ChatStreamer(BaseStreamer):
1491
+
1492
+ def __init__(self, tokenizer) -> None:
1493
+ super().__init__()
1494
+ self.tokenizer = tokenizer
1495
+ self.queue = response_queue
1496
+ self.query = query
1497
+ self.history = history
1498
+ self.response = ''
1499
+ self.received_inputs = False
1500
+ self.queue.put(
1501
+ (self.response, history + [(self.query, self.response)]))
1502
+
1503
+ def put(self, value):
1504
+ if len(value.shape) > 1 and value.shape[0] > 1:
1505
+ raise ValueError('ChatStreamer only supports batch size 1')
1506
+ elif len(value.shape) > 1:
1507
+ value = value[0]
1508
+
1509
+ if not self.received_inputs:
1510
+ # The first received value is input_ids, ignore here
1511
+ self.received_inputs = True
1512
+ return
1513
+
1514
+ token = self.tokenizer.decode([value[-1]],
1515
+ skip_special_tokens=True)
1516
+ if token.strip() != '[UNUSED_TOKEN_0]':
1517
+ self.response = self.response + token
1518
+ history = self.history + [(self.query, self.response)]
1519
+ self.queue.put((self.response, history))
1520
+
1521
+ def end(self):
1522
+ self.queue.put(None)
1523
+
1524
+ def stream_producer():
1525
+ return self.chat(
1526
+ tokenizer=tokenizer,
1527
+ query=query,
1528
+ streamer=ChatStreamer(tokenizer=tokenizer),
1529
+ history=history,
1530
+ max_new_tokens=max_new_tokens,
1531
+ do_sample=do_sample,
1532
+ temperature=temperature,
1533
+ top_p=top_p,
1534
+ **kwargs,
1535
+ )
1536
+
1537
+ def consumer():
1538
+ producer = threading.Thread(target=stream_producer)
1539
+ producer.start()
1540
+ while True:
1541
+ res = response_queue.get()
1542
+ if res is None:
1543
+ return
1544
+ yield res
1545
+
1546
+ return consumer()
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f122ef6869471e17df560bbc7706b633195d851eca72d5d8616811cd53675d16
3
+ size 17331921065
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "</s>",
5
+ "unk_token": "<unk>"
6
+ }
tokenization_internlm.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) InternLM. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ """Tokenization classes for IntermLM."""
22
+ import os
23
+ from shutil import copyfile
24
+ from typing import Any, Dict, List, Optional, Tuple
25
+
26
+ import sentencepiece as spm
27
+ from transformers.tokenization_utils import PreTrainedTokenizer
28
+ from transformers.utils import logging
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ VOCAB_FILES_NAMES = {"vocab_file": "./tokenizer.model"}
33
+
34
+ PRETRAINED_VOCAB_FILES_MAP = {}
35
+
36
+
37
+ class InternLMTokenizer(PreTrainedTokenizer):
38
+ """
39
+ Construct a InternLM tokenizer. Based on byte-level Byte-Pair-Encoding.
40
+
41
+ Args:
42
+ vocab_file (`str`):
43
+ Path to the vocabulary file.
44
+ """
45
+
46
+ vocab_files_names = VOCAB_FILES_NAMES
47
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
48
+ model_input_names = ["input_ids", "attention_mask"]
49
+ _auto_class = "AutoTokenizer"
50
+
51
+ def __init__(
52
+ self,
53
+ vocab_file,
54
+ unk_token="<unk>",
55
+ bos_token="<s>",
56
+ eos_token="</s>",
57
+ pad_token="</s>",
58
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
59
+ add_bos_token=True,
60
+ add_eos_token=False,
61
+ decode_with_prefix_space=False,
62
+ clean_up_tokenization_spaces=False,
63
+ **kwargs,
64
+ ):
65
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
66
+ self.vocab_file = vocab_file
67
+ self.add_bos_token = add_bos_token
68
+ self.add_eos_token = add_eos_token
69
+ self.decode_with_prefix_space = decode_with_prefix_space
70
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
71
+ self.sp_model.Load(vocab_file)
72
+ self._no_prefix_space_tokens = None
73
+ super().__init__(
74
+ bos_token=bos_token,
75
+ eos_token=eos_token,
76
+ unk_token=unk_token,
77
+ pad_token=pad_token,
78
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
79
+ **kwargs,
80
+ )
81
+
82
+ """ Initialization"""
83
+
84
+ @property
85
+ def no_prefix_space_tokens(self):
86
+ if self._no_prefix_space_tokens is None:
87
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
88
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith("▁")}
89
+ return self._no_prefix_space_tokens
90
+
91
+ @property
92
+ def vocab_size(self):
93
+ """Returns vocab size"""
94
+ return self.sp_model.get_piece_size()
95
+
96
+ @property
97
+ def bos_token_id(self) -> Optional[int]:
98
+ return self.sp_model.bos_id()
99
+
100
+ @property
101
+ def eos_token_id(self) -> Optional[int]:
102
+ return self.sp_model.eos_id()
103
+
104
+ def get_vocab(self):
105
+ """Returns vocab as a dict"""
106
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
107
+ vocab.update(self.added_tokens_encoder)
108
+ return vocab
109
+
110
+ def _tokenize(self, text):
111
+ """Returns a tokenized string."""
112
+ return self.sp_model.encode(text, out_type=str)
113
+
114
+ def _convert_token_to_id(self, token):
115
+ """Converts a token (str) in an id using the vocab."""
116
+ return self.sp_model.piece_to_id(token)
117
+
118
+ def _convert_id_to_token(self, index):
119
+ """Converts an index (integer) in a token (str) using the vocab."""
120
+ token = self.sp_model.IdToPiece(index)
121
+ return token
122
+
123
+ def _maybe_add_prefix_space(self, tokens, decoded):
124
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
125
+ return " " + decoded
126
+ else:
127
+ return decoded
128
+
129
+ def convert_tokens_to_string(self, tokens):
130
+ """Converts a sequence of tokens (string) in a single string."""
131
+ current_sub_tokens = []
132
+ out_string = ""
133
+ prev_is_special = False
134
+ for token in tokens:
135
+ # make sure that special tokens are not decoded using sentencepiece model
136
+ if token in self.all_special_tokens:
137
+ if not prev_is_special:
138
+ out_string += " "
139
+ out_string += self.sp_model.decode(current_sub_tokens) + token
140
+ prev_is_special = True
141
+ current_sub_tokens = []
142
+ else:
143
+ current_sub_tokens.append(token)
144
+ prev_is_special = False
145
+ out_string += self.sp_model.decode(current_sub_tokens)
146
+ out_string = self.clean_up_tokenization(out_string)
147
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
148
+ return out_string[1:]
149
+
150
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
151
+ """
152
+ Save the vocabulary and special tokens file to a directory.
153
+
154
+ Args:
155
+ save_directory (`str`):
156
+ The directory in which to save the vocabulary.
157
+
158
+ Returns:
159
+ `Tuple(str)`: Paths to the files saved.
160
+ """
161
+ if not os.path.isdir(save_directory):
162
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
163
+ return
164
+ out_vocab_file = os.path.join(
165
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
166
+ )
167
+
168
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
169
+ copyfile(self.vocab_file, out_vocab_file)
170
+ elif not os.path.isfile(self.vocab_file):
171
+ with open(out_vocab_file, "wb") as fi:
172
+ content_spiece_model = self.sp_model.serialized_model_proto()
173
+ fi.write(content_spiece_model)
174
+
175
+ return (out_vocab_file,)
176
+
177
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
178
+ if self.add_bos_token:
179
+ bos_token_ids = [self.bos_token_id]
180
+ else:
181
+ bos_token_ids = []
182
+
183
+ output = bos_token_ids + token_ids_0
184
+
185
+ if token_ids_1 is not None:
186
+ output = output + token_ids_1
187
+
188
+ if self.add_eos_token:
189
+ output = output + [self.eos_token_id]
190
+
191
+ return output
192
+
193
+ def get_special_tokens_mask(
194
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
195
+ ) -> List[int]:
196
+ """
197
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
198
+ special tokens using the tokenizer `prepare_for_model` method.
199
+
200
+ Args:
201
+ token_ids_0 (`List[int]`):
202
+ List of IDs.
203
+ token_ids_1 (`List[int]`, *optional*):
204
+ Optional second list of IDs for sequence pairs.
205
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
206
+ Whether or not the token list is already formatted with special tokens for the model.
207
+
208
+ Returns:
209
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
210
+ """
211
+ if already_has_special_tokens:
212
+ return super().get_special_tokens_mask(
213
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
214
+ )
215
+
216
+ if token_ids_1 is None:
217
+ return [1] + ([0] * len(token_ids_0)) + [1]
218
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
219
+
220
+ def create_token_type_ids_from_sequences(
221
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
222
+ ) -> List[int]:
223
+ """
224
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
225
+ use of token type ids, therefore a list of zeros is returned.
226
+
227
+ Args:
228
+ token_ids_0 (`List[int]`):
229
+ List of IDs.
230
+ token_ids_1 (`List[int]`, *optional*):
231
+ Optional second list of IDs for sequence pairs.
232
+
233
+ Returns:
234
+ `List[int]`: List of zeros.
235
+ """
236
+ eos = [self.eos_token_id]
237
+
238
+ if token_ids_1 is None:
239
+ return len(token_ids_0 + eos) * [0]
240
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoTokenizer": [
4
+ "tokenization_internlm.InternLMTokenizer",
5
+ null
6
+ ]
7
+ },
8
+ "bos_token": "<s>",
9
+ "clean_up_tokenization_spaces": false,
10
+ "eos_token": "</s>",
11
+ "model_max_length": 1000000000000000019884624838656,
12
+ "pad_token": "</s>",
13
+ "padding_side": "right",
14
+ "tokenizer_class": "InternLMTokenizer",
15
+ "unk_token": "<unk>"
16
+ }
trainer_state.json ADDED
@@ -0,0 +1,653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9968,
5
+ "eval_steps": 500,
6
+ "global_step": 78,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": "0.0000e+00",
14
+ "loss": 2.2666,
15
+ "slid_loss": 2.2666,
16
+ "step": 1,
17
+ "time": 42.16
18
+ },
19
+ {
20
+ "epoch": 0.05,
21
+ "learning_rate": "5.0000e-06",
22
+ "loss": 2.2601,
23
+ "slid_loss": 2.2634,
24
+ "step": 2,
25
+ "time": 34.12
26
+ },
27
+ {
28
+ "epoch": 0.08,
29
+ "learning_rate": "5.0000e-06",
30
+ "loss": 2.3071,
31
+ "slid_loss": 2.2779,
32
+ "step": 3,
33
+ "time": 33.4
34
+ },
35
+ {
36
+ "epoch": 0.1,
37
+ "learning_rate": "5.0000e-06",
38
+ "loss": 2.1847,
39
+ "slid_loss": 2.2546,
40
+ "step": 4,
41
+ "time": 33.28
42
+ },
43
+ {
44
+ "epoch": 0.13,
45
+ "learning_rate": "5.0000e-06",
46
+ "loss": 2.2277,
47
+ "slid_loss": 2.2492,
48
+ "step": 5,
49
+ "time": 34.62
50
+ },
51
+ {
52
+ "epoch": 0.15,
53
+ "learning_rate": "5.0000e-06",
54
+ "loss": 2.1922,
55
+ "slid_loss": 2.2397,
56
+ "step": 6,
57
+ "time": 32.87
58
+ },
59
+ {
60
+ "epoch": 0.18,
61
+ "learning_rate": "5.0000e-06",
62
+ "loss": 2.168,
63
+ "slid_loss": 2.2295,
64
+ "step": 7,
65
+ "time": 33.59
66
+ },
67
+ {
68
+ "epoch": 0.2,
69
+ "learning_rate": "5.0000e-06",
70
+ "loss": 2.2024,
71
+ "slid_loss": 2.2261,
72
+ "step": 8,
73
+ "time": 33.64
74
+ },
75
+ {
76
+ "epoch": 0.23,
77
+ "learning_rate": "5.0000e-06",
78
+ "loss": 2.1198,
79
+ "slid_loss": 2.2143,
80
+ "step": 9,
81
+ "time": 35.32
82
+ },
83
+ {
84
+ "epoch": 0.26,
85
+ "learning_rate": "5.0000e-06",
86
+ "loss": 2.139,
87
+ "slid_loss": 2.2068,
88
+ "step": 10,
89
+ "time": 33.38
90
+ },
91
+ {
92
+ "epoch": 0.28,
93
+ "learning_rate": "5.0000e-06",
94
+ "loss": 2.1052,
95
+ "slid_loss": 2.1975,
96
+ "step": 11,
97
+ "time": 33.38
98
+ },
99
+ {
100
+ "epoch": 0.31,
101
+ "learning_rate": "5.0000e-06",
102
+ "loss": 2.1561,
103
+ "slid_loss": 2.1941,
104
+ "step": 12,
105
+ "time": 33.0
106
+ },
107
+ {
108
+ "epoch": 0.33,
109
+ "learning_rate": "5.0000e-06",
110
+ "loss": 2.085,
111
+ "slid_loss": 2.1857,
112
+ "step": 13,
113
+ "time": 32.73
114
+ },
115
+ {
116
+ "epoch": 0.36,
117
+ "learning_rate": "5.0000e-06",
118
+ "loss": 2.1404,
119
+ "slid_loss": 2.1824,
120
+ "step": 14,
121
+ "time": 33.91
122
+ },
123
+ {
124
+ "epoch": 0.38,
125
+ "learning_rate": "5.0000e-06",
126
+ "loss": 2.0282,
127
+ "slid_loss": 2.1722,
128
+ "step": 15,
129
+ "time": 32.97
130
+ },
131
+ {
132
+ "epoch": 0.41,
133
+ "learning_rate": "5.0000e-06",
134
+ "loss": 2.0576,
135
+ "slid_loss": 2.165,
136
+ "step": 16,
137
+ "time": 32.89
138
+ },
139
+ {
140
+ "epoch": 0.44,
141
+ "learning_rate": "5.0000e-06",
142
+ "loss": 2.0584,
143
+ "slid_loss": 2.1587,
144
+ "step": 17,
145
+ "time": 33.64
146
+ },
147
+ {
148
+ "epoch": 0.46,
149
+ "learning_rate": "5.0000e-06",
150
+ "loss": 2.086,
151
+ "slid_loss": 2.1547,
152
+ "step": 18,
153
+ "time": 35.21
154
+ },
155
+ {
156
+ "epoch": 0.49,
157
+ "learning_rate": "5.0000e-06",
158
+ "loss": 2.0918,
159
+ "slid_loss": 2.1514,
160
+ "step": 19,
161
+ "time": 33.29
162
+ },
163
+ {
164
+ "epoch": 0.51,
165
+ "learning_rate": "5.0000e-06",
166
+ "loss": 2.0255,
167
+ "slid_loss": 2.1451,
168
+ "step": 20,
169
+ "time": 33.69
170
+ },
171
+ {
172
+ "epoch": 0.54,
173
+ "learning_rate": "5.0000e-06",
174
+ "loss": 2.0119,
175
+ "slid_loss": 2.1387,
176
+ "step": 21,
177
+ "time": 33.5
178
+ },
179
+ {
180
+ "epoch": 0.56,
181
+ "learning_rate": "5.0000e-06",
182
+ "loss": 1.9633,
183
+ "slid_loss": 2.1308,
184
+ "step": 22,
185
+ "time": 35.21
186
+ },
187
+ {
188
+ "epoch": 0.59,
189
+ "learning_rate": "5.0000e-06",
190
+ "loss": 2.0063,
191
+ "slid_loss": 2.1254,
192
+ "step": 23,
193
+ "time": 32.96
194
+ },
195
+ {
196
+ "epoch": 0.61,
197
+ "learning_rate": "5.0000e-06",
198
+ "loss": 2.0122,
199
+ "slid_loss": 2.1206,
200
+ "step": 24,
201
+ "time": 33.34
202
+ },
203
+ {
204
+ "epoch": 0.64,
205
+ "learning_rate": "5.0000e-06",
206
+ "loss": 1.9364,
207
+ "slid_loss": 2.1133,
208
+ "step": 25,
209
+ "time": 33.35
210
+ },
211
+ {
212
+ "epoch": 0.67,
213
+ "learning_rate": "5.0000e-06",
214
+ "loss": 1.9493,
215
+ "slid_loss": 2.107,
216
+ "step": 26,
217
+ "time": 33.24
218
+ },
219
+ {
220
+ "epoch": 0.69,
221
+ "learning_rate": "5.0000e-06",
222
+ "loss": 1.9124,
223
+ "slid_loss": 2.0998,
224
+ "step": 27,
225
+ "time": 33.34
226
+ },
227
+ {
228
+ "epoch": 0.72,
229
+ "learning_rate": "5.0000e-06",
230
+ "loss": 1.9077,
231
+ "slid_loss": 2.0929,
232
+ "step": 28,
233
+ "time": 33.03
234
+ },
235
+ {
236
+ "epoch": 0.74,
237
+ "learning_rate": "5.0000e-06",
238
+ "loss": 1.9838,
239
+ "slid_loss": 2.0891,
240
+ "step": 29,
241
+ "time": 34.5
242
+ },
243
+ {
244
+ "epoch": 0.77,
245
+ "learning_rate": "5.0000e-06",
246
+ "loss": 1.988,
247
+ "slid_loss": 2.0858,
248
+ "step": 30,
249
+ "time": 33.39
250
+ },
251
+ {
252
+ "epoch": 0.79,
253
+ "learning_rate": "5.0000e-06",
254
+ "loss": 1.9561,
255
+ "slid_loss": 2.0816,
256
+ "step": 31,
257
+ "time": 33.25
258
+ },
259
+ {
260
+ "epoch": 0.82,
261
+ "learning_rate": "5.0000e-06",
262
+ "loss": 1.8664,
263
+ "slid_loss": 2.0749,
264
+ "step": 32,
265
+ "time": 32.75
266
+ },
267
+ {
268
+ "epoch": 0.84,
269
+ "learning_rate": "5.0000e-06",
270
+ "loss": 1.8385,
271
+ "slid_loss": 2.0677,
272
+ "step": 33,
273
+ "time": 33.61
274
+ },
275
+ {
276
+ "epoch": 0.87,
277
+ "learning_rate": "5.0000e-06",
278
+ "loss": 1.8827,
279
+ "slid_loss": 2.0623,
280
+ "step": 34,
281
+ "time": 33.48
282
+ },
283
+ {
284
+ "epoch": 0.9,
285
+ "learning_rate": "5.0000e-06",
286
+ "loss": 1.8249,
287
+ "slid_loss": 2.0555,
288
+ "step": 35,
289
+ "time": 33.62
290
+ },
291
+ {
292
+ "epoch": 0.92,
293
+ "learning_rate": "5.0000e-06",
294
+ "loss": 1.8204,
295
+ "slid_loss": 2.049,
296
+ "step": 36,
297
+ "time": 33.21
298
+ },
299
+ {
300
+ "epoch": 0.95,
301
+ "learning_rate": "5.0000e-06",
302
+ "loss": 1.8761,
303
+ "slid_loss": 2.0443,
304
+ "step": 37,
305
+ "time": 32.95
306
+ },
307
+ {
308
+ "epoch": 0.97,
309
+ "learning_rate": "5.0000e-06",
310
+ "loss": 1.8621,
311
+ "slid_loss": 2.0395,
312
+ "step": 38,
313
+ "time": 33.02
314
+ },
315
+ {
316
+ "epoch": 1.0,
317
+ "learning_rate": "5.0000e-06",
318
+ "loss": 1.7632,
319
+ "slid_loss": 2.0324,
320
+ "step": 39,
321
+ "time": 32.9
322
+ },
323
+ {
324
+ "epoch": 1.02,
325
+ "learning_rate": "5.0000e-06",
326
+ "loss": 1.8407,
327
+ "slid_loss": 2.0276,
328
+ "step": 40,
329
+ "time": 192.32
330
+ },
331
+ {
332
+ "epoch": 1.05,
333
+ "learning_rate": "5.0000e-06",
334
+ "loss": 1.7514,
335
+ "slid_loss": 2.0209,
336
+ "step": 41,
337
+ "time": 33.2
338
+ },
339
+ {
340
+ "epoch": 1.08,
341
+ "learning_rate": "5.0000e-06",
342
+ "loss": 1.7342,
343
+ "slid_loss": 2.014,
344
+ "step": 42,
345
+ "time": 33.32
346
+ },
347
+ {
348
+ "epoch": 1.1,
349
+ "learning_rate": "5.0000e-06",
350
+ "loss": 1.7591,
351
+ "slid_loss": 2.0081,
352
+ "step": 43,
353
+ "time": 32.94
354
+ },
355
+ {
356
+ "epoch": 1.13,
357
+ "learning_rate": "5.0000e-06",
358
+ "loss": 1.7156,
359
+ "slid_loss": 2.0015,
360
+ "step": 44,
361
+ "time": 32.85
362
+ },
363
+ {
364
+ "epoch": 1.15,
365
+ "learning_rate": "5.0000e-06",
366
+ "loss": 1.7146,
367
+ "slid_loss": 1.9951,
368
+ "step": 45,
369
+ "time": 32.84
370
+ },
371
+ {
372
+ "epoch": 1.18,
373
+ "learning_rate": "5.0000e-06",
374
+ "loss": 1.7197,
375
+ "slid_loss": 1.9891,
376
+ "step": 46,
377
+ "time": 32.83
378
+ },
379
+ {
380
+ "epoch": 1.2,
381
+ "learning_rate": "5.0000e-06",
382
+ "loss": 1.6992,
383
+ "slid_loss": 1.9829,
384
+ "step": 47,
385
+ "time": 33.24
386
+ },
387
+ {
388
+ "epoch": 1.23,
389
+ "learning_rate": "5.0000e-06",
390
+ "loss": 1.7154,
391
+ "slid_loss": 1.9774,
392
+ "step": 48,
393
+ "time": 34.15
394
+ },
395
+ {
396
+ "epoch": 1.25,
397
+ "learning_rate": "5.0000e-06",
398
+ "loss": 1.6725,
399
+ "slid_loss": 1.9711,
400
+ "step": 49,
401
+ "time": 35.49
402
+ },
403
+ {
404
+ "epoch": 1.28,
405
+ "learning_rate": "5.0000e-06",
406
+ "loss": 1.6221,
407
+ "slid_loss": 1.9642,
408
+ "step": 50,
409
+ "time": 33.02
410
+ },
411
+ {
412
+ "epoch": 1.31,
413
+ "learning_rate": "5.0000e-06",
414
+ "loss": 1.656,
415
+ "slid_loss": 1.9581,
416
+ "step": 51,
417
+ "time": 33.54
418
+ },
419
+ {
420
+ "epoch": 1.33,
421
+ "learning_rate": "5.0000e-06",
422
+ "loss": 1.6232,
423
+ "slid_loss": 1.9517,
424
+ "step": 52,
425
+ "time": 33.15
426
+ },
427
+ {
428
+ "epoch": 1.36,
429
+ "learning_rate": "5.0000e-06",
430
+ "loss": 1.6363,
431
+ "slid_loss": 1.9457,
432
+ "step": 53,
433
+ "time": 33.17
434
+ },
435
+ {
436
+ "epoch": 1.38,
437
+ "learning_rate": "5.0000e-06",
438
+ "loss": 1.6079,
439
+ "slid_loss": 1.9395,
440
+ "step": 54,
441
+ "time": 32.8
442
+ },
443
+ {
444
+ "epoch": 1.41,
445
+ "learning_rate": "5.0000e-06",
446
+ "loss": 1.5803,
447
+ "slid_loss": 1.9329,
448
+ "step": 55,
449
+ "time": 33.72
450
+ },
451
+ {
452
+ "epoch": 1.43,
453
+ "learning_rate": "5.0000e-06",
454
+ "loss": 1.5249,
455
+ "slid_loss": 1.9257,
456
+ "step": 56,
457
+ "time": 33.48
458
+ },
459
+ {
460
+ "epoch": 1.46,
461
+ "learning_rate": "5.0000e-06",
462
+ "loss": 1.624,
463
+ "slid_loss": 1.9204,
464
+ "step": 57,
465
+ "time": 33.19
466
+ },
467
+ {
468
+ "epoch": 1.48,
469
+ "learning_rate": "5.0000e-06",
470
+ "loss": 1.5509,
471
+ "slid_loss": 1.914,
472
+ "step": 58,
473
+ "time": 32.7
474
+ },
475
+ {
476
+ "epoch": 1.51,
477
+ "learning_rate": "5.0000e-06",
478
+ "loss": 1.5339,
479
+ "slid_loss": 1.9076,
480
+ "step": 59,
481
+ "time": 34.98
482
+ },
483
+ {
484
+ "epoch": 1.54,
485
+ "learning_rate": "5.0000e-06",
486
+ "loss": 1.559,
487
+ "slid_loss": 1.9017,
488
+ "step": 60,
489
+ "time": 33.29
490
+ },
491
+ {
492
+ "epoch": 1.56,
493
+ "learning_rate": "5.0000e-06",
494
+ "loss": 1.4958,
495
+ "slid_loss": 1.8951,
496
+ "step": 61,
497
+ "time": 32.61
498
+ },
499
+ {
500
+ "epoch": 1.59,
501
+ "learning_rate": "5.0000e-06",
502
+ "loss": 1.4871,
503
+ "slid_loss": 1.8885,
504
+ "step": 62,
505
+ "time": 33.46
506
+ },
507
+ {
508
+ "epoch": 1.61,
509
+ "learning_rate": "5.0000e-06",
510
+ "loss": 1.4523,
511
+ "slid_loss": 1.8816,
512
+ "step": 63,
513
+ "time": 32.93
514
+ },
515
+ {
516
+ "epoch": 1.64,
517
+ "learning_rate": "5.0000e-06",
518
+ "loss": 1.4786,
519
+ "slid_loss": 1.8753,
520
+ "step": 64,
521
+ "time": 33.78
522
+ },
523
+ {
524
+ "epoch": 1.66,
525
+ "learning_rate": "5.0000e-06",
526
+ "loss": 1.4455,
527
+ "slid_loss": 1.8687,
528
+ "step": 65,
529
+ "time": 32.82
530
+ },
531
+ {
532
+ "epoch": 1.69,
533
+ "learning_rate": "5.0000e-06",
534
+ "loss": 1.4159,
535
+ "slid_loss": 1.8618,
536
+ "step": 66,
537
+ "time": 34.87
538
+ },
539
+ {
540
+ "epoch": 1.72,
541
+ "learning_rate": "5.0000e-06",
542
+ "loss": 1.3869,
543
+ "slid_loss": 1.8547,
544
+ "step": 67,
545
+ "time": 33.06
546
+ },
547
+ {
548
+ "epoch": 1.74,
549
+ "learning_rate": "5.0000e-06",
550
+ "loss": 1.3814,
551
+ "slid_loss": 1.8478,
552
+ "step": 68,
553
+ "time": 34.85
554
+ },
555
+ {
556
+ "epoch": 1.77,
557
+ "learning_rate": "5.0000e-06",
558
+ "loss": 1.3668,
559
+ "slid_loss": 1.8408,
560
+ "step": 69,
561
+ "time": 33.18
562
+ },
563
+ {
564
+ "epoch": 1.79,
565
+ "learning_rate": "5.0000e-06",
566
+ "loss": 1.4419,
567
+ "slid_loss": 1.8351,
568
+ "step": 70,
569
+ "time": 34.61
570
+ },
571
+ {
572
+ "epoch": 1.82,
573
+ "learning_rate": "5.0000e-06",
574
+ "loss": 1.3532,
575
+ "slid_loss": 1.8283,
576
+ "step": 71,
577
+ "time": 33.92
578
+ },
579
+ {
580
+ "epoch": 1.84,
581
+ "learning_rate": "5.0000e-06",
582
+ "loss": 1.343,
583
+ "slid_loss": 1.8216,
584
+ "step": 72,
585
+ "time": 32.6
586
+ },
587
+ {
588
+ "epoch": 1.87,
589
+ "learning_rate": "5.0000e-06",
590
+ "loss": 1.3843,
591
+ "slid_loss": 1.8156,
592
+ "step": 73,
593
+ "time": 32.92
594
+ },
595
+ {
596
+ "epoch": 1.89,
597
+ "learning_rate": "5.0000e-06",
598
+ "loss": 1.3455,
599
+ "slid_loss": 1.8092,
600
+ "step": 74,
601
+ "time": 33.47
602
+ },
603
+ {
604
+ "epoch": 1.92,
605
+ "learning_rate": "5.0000e-06",
606
+ "loss": 1.3042,
607
+ "slid_loss": 1.8025,
608
+ "step": 75,
609
+ "time": 33.54
610
+ },
611
+ {
612
+ "epoch": 1.95,
613
+ "learning_rate": "5.0000e-06",
614
+ "loss": 1.347,
615
+ "slid_loss": 1.7965,
616
+ "step": 76,
617
+ "time": 33.22
618
+ },
619
+ {
620
+ "epoch": 1.97,
621
+ "learning_rate": "5.0000e-06",
622
+ "loss": 1.237,
623
+ "slid_loss": 1.7892,
624
+ "step": 77,
625
+ "time": 33.25
626
+ },
627
+ {
628
+ "epoch": 2.0,
629
+ "learning_rate": "5.0000e-06",
630
+ "loss": 1.1854,
631
+ "slid_loss": 1.7815,
632
+ "step": 78,
633
+ "time": 33.47
634
+ },
635
+ {
636
+ "epoch": 2.0,
637
+ "step": 78,
638
+ "time": 167.03,
639
+ "total_flos": 0.0,
640
+ "train_loss": 1.781490119603964,
641
+ "train_runtime": 2945.5278,
642
+ "train_samples_per_second": 6.79,
643
+ "train_steps_per_second": 0.026
644
+ }
645
+ ],
646
+ "logging_steps": 1.0,
647
+ "max_steps": 78,
648
+ "num_train_epochs": 2,
649
+ "save_steps": 500,
650
+ "total_flos": 0.0,
651
+ "trial_name": null,
652
+ "trial_params": null
653
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d56272d98cc470efa4a2443fc49bf438f1874c65c0880d661223c30a0f6da16
3
+ size 5883