bibproj commited on
Commit
7df8408
·
verified ·
1 Parent(s): eace165
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
model-00051-of-00054.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72cd9b5b78c1999ac6f93a57ff8f3036e1a1bdc39ac77e88b03818f79ac1f72e
3
+ size 4278319949
model-00052-of-00054.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d57b4a4d7a4ac4aad734ca97fb30133e5017426bd3cda1ea8c3e05a6ee09e16e
3
+ size 5335284414
model-00053-of-00054.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a70187d04bd7d171636d82e225f14e3a79562b044efd5c0b28a95fdfdd5036a0
3
+ size 4278319921
model-00054-of-00054.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c752d604c7d6825ae03022e4df0d9b1fccb351f0d562985be6094b8f6e42442f
3
+ size 2050396296
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_minimax_m2.py ADDED
@@ -0,0 +1,706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from collections.abc import Callable
24
+ from typing import Optional, Union, Unpack
25
+
26
+ import torch
27
+ from torch import nn
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.generation import GenerationMixin
32
+ from transformers.integrations import use_kernel_forward_from_hub
33
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
34
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
35
+ from transformers.modeling_layers import (
36
+ GenericForQuestionAnswering,
37
+ GenericForSequenceClassification,
38
+ GenericForTokenClassification,
39
+ GradientCheckpointingLayer,
40
+ )
41
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
42
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
43
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
44
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
45
+ from transformers.utils.deprecation import deprecate_kwarg
46
+ from transformers.utils.generic import OutputRecorder, check_model_inputs
47
+ from .configuration_minimax_m2 import MiniMaxM2Config
48
+
49
+
50
+ class MiniMaxM2MLP(nn.Module):
51
+ def __init__(self, config: MiniMaxM2Config):
52
+ super().__init__()
53
+ self.ffn_dim = config.intermediate_size
54
+ self.hidden_dim = config.hidden_size
55
+
56
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
57
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
58
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
59
+
60
+ self.act_fn = ACT2FN[config.hidden_act]
61
+
62
+ def forward(self, hidden_states):
63
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
64
+ current_hidden_states = self.w2(current_hidden_states)
65
+ return current_hidden_states
66
+
67
+
68
+ class MiniMaxM2Experts(nn.ModuleList):
69
+ """
70
+ ModuleList of experts.
71
+ """
72
+
73
+ def __init__(self, config: MiniMaxM2Config):
74
+ super().__init__()
75
+ self.top_k = config.num_experts_per_tok
76
+ self.num_experts = config.num_local_experts
77
+ for _ in range(self.num_experts):
78
+ self.append(MiniMaxM2MLP(config))
79
+
80
+ def forward(
81
+ self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor
82
+ ) -> torch.Tensor:
83
+ """
84
+ Args:
85
+ hidden_states: (batch_size * sequence_length, hidden_dim)
86
+ selected_experts: (batch_size * sequence_length, top_k)
87
+ routing_weights: (batch_size * sequence_length, top_k)
88
+ Returns:
89
+ (batch_size * sequence_length, hidden_dim)
90
+ """
91
+ final_hidden_states = torch.zeros_like(hidden_states)
92
+ expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts).permute(2, 1, 0)
93
+
94
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
95
+ for expert_idx in expert_hit:
96
+ idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
97
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
98
+ current_hidden_states = self[expert_idx](current_state) * top_k_weights[top_x, idx, None]
99
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
100
+ return final_hidden_states
101
+
102
+
103
+ class MiniMaxM2SparseMoeBlock(nn.Module):
104
+ def __init__(self, config):
105
+ super().__init__()
106
+ self.top_k = config.num_experts_per_tok
107
+ self.jitter_noise = config.router_jitter_noise
108
+ self.gate = nn.Linear(config.hidden_size, config.num_local_experts, bias=False)
109
+ self.experts = MiniMaxM2Experts(config)
110
+ self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts))
111
+
112
+ def route_tokens_to_experts(self, router_logits):
113
+ routing_weights = torch.nn.functional.sigmoid(router_logits.float())
114
+ scores_for_choice = routing_weights + self.e_score_correction_bias
115
+ _, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False)
116
+ top_k_weights = routing_weights.gather(1, top_k_index)
117
+ top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True)
118
+ return top_k_index, top_k_weights.to(router_logits.dtype)
119
+
120
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
121
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
122
+ if self.training and self.jitter_noise > 0:
123
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
124
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
125
+ router_logits = self.gate(hidden_states)
126
+ top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
127
+ hidden_states = self.experts(hidden_states, top_k_index, top_k_weights.to(hidden_states.dtype))
128
+ hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
129
+ return hidden_states, router_logits
130
+
131
+
132
+ @use_kernel_forward_from_hub("RMSNorm")
133
+ class MiniMaxM2RMSNorm(nn.Module):
134
+ def __init__(self, hidden_size, eps=1e-6):
135
+ """
136
+ MiniMaxM2RMSNorm is equivalent to T5LayerNorm
137
+ """
138
+ super().__init__()
139
+ self.weight = nn.Parameter(torch.ones(hidden_size))
140
+ self.variance_epsilon = eps
141
+
142
+ def forward(self, hidden_states):
143
+ input_dtype = hidden_states.dtype
144
+ hidden_states = hidden_states.to(torch.float32)
145
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
146
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
147
+ return self.weight * hidden_states.to(input_dtype)
148
+
149
+ def extra_repr(self):
150
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
151
+
152
+
153
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
154
+ """
155
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
156
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
157
+ """
158
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
159
+ if n_rep == 1:
160
+ return hidden_states
161
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
162
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
163
+
164
+
165
+ def eager_attention_forward(
166
+ module: nn.Module,
167
+ query: torch.Tensor,
168
+ key: torch.Tensor,
169
+ value: torch.Tensor,
170
+ attention_mask: Optional[torch.Tensor],
171
+ scaling: float,
172
+ dropout: float = 0.0,
173
+ **kwargs: Unpack[TransformersKwargs],
174
+ ):
175
+ key_states = repeat_kv(key, module.num_key_value_groups)
176
+ value_states = repeat_kv(value, module.num_key_value_groups)
177
+
178
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
179
+ if attention_mask is not None:
180
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
181
+ attn_weights = attn_weights + causal_mask
182
+
183
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
184
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
185
+ attn_output = torch.matmul(attn_weights, value_states)
186
+ attn_output = attn_output.transpose(1, 2).contiguous()
187
+
188
+ return attn_output, attn_weights
189
+
190
+
191
+ def rotate_half(x):
192
+ """Rotates half the hidden dims of the input."""
193
+ x1 = x[..., : x.shape[-1] // 2]
194
+ x2 = x[..., x.shape[-1] // 2 :]
195
+ return torch.cat((-x2, x1), dim=-1)
196
+
197
+
198
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
199
+ """Applies Rotary Position Embedding to the query and key tensors.
200
+
201
+ Args:
202
+ q (`torch.Tensor`): The query tensor.
203
+ k (`torch.Tensor`): The key tensor.
204
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
205
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
206
+ position_ids (`torch.Tensor`, *optional*):
207
+ Deprecated and unused.
208
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
209
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
210
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
211
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
212
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
213
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
214
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
215
+ Returns:
216
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
217
+ """
218
+ cos = cos.unsqueeze(unsqueeze_dim)
219
+ sin = sin.unsqueeze(unsqueeze_dim)
220
+
221
+ # Keep half or full tensor for later concatenation
222
+ rotary_dim = cos.shape[-1]
223
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
224
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
225
+
226
+ # Apply rotary embeddings on the first half or full tensor
227
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
228
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
229
+
230
+ # Concatenate back to full shape
231
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
232
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
233
+ return q_embed, k_embed
234
+
235
+
236
+ class MiniMaxM2Attention(nn.Module):
237
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
238
+
239
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
240
+ super().__init__()
241
+ self.config = config
242
+ self.layer_idx = layer_idx
243
+ self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
244
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
245
+ self.scaling = self.head_dim**-0.5
246
+ self.attention_dropout = config.attention_dropout
247
+ self.is_causal = True
248
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
249
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
250
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
251
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
252
+
253
+ self.use_qk_norm = config.use_qk_norm
254
+ if self.use_qk_norm:
255
+ self.q_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_attention_heads, eps=config.rms_norm_eps)
256
+ self.k_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_key_value_heads, eps=config.rms_norm_eps)
257
+
258
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
259
+ def forward(
260
+ self,
261
+ hidden_states: torch.Tensor,
262
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
263
+ attention_mask: Optional[torch.Tensor],
264
+ past_key_values: Optional[Cache] = None,
265
+ cache_position: Optional[torch.LongTensor] = None,
266
+ **kwargs: Unpack[FlashAttentionKwargs],
267
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
268
+ input_shape = hidden_states.shape[:-1]
269
+ hidden_shape = (*input_shape, -1, self.head_dim)
270
+
271
+ query_states = self.q_proj(hidden_states)
272
+ key_states = self.k_proj(hidden_states)
273
+ value_states = self.v_proj(hidden_states)
274
+
275
+ if self.use_qk_norm: # main diff from Llama
276
+ query_states = self.q_norm(query_states)
277
+ key_states = self.k_norm(key_states)
278
+
279
+ key_states = key_states.view(hidden_shape)
280
+ query_states = query_states.view(hidden_shape)
281
+ value_states = value_states.view(hidden_shape)
282
+
283
+ query_states = query_states.transpose(1, 2)
284
+ key_states = key_states.transpose(1, 2)
285
+ value_states = value_states.transpose(1, 2)
286
+
287
+ cos, sin = position_embeddings
288
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
289
+
290
+ if past_key_values is not None:
291
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
292
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
293
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
294
+
295
+ attention_interface: Callable = eager_attention_forward
296
+ if self.config._attn_implementation != "eager":
297
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
298
+
299
+ attn_output, attn_weights = attention_interface(
300
+ self,
301
+ query_states,
302
+ key_states,
303
+ value_states,
304
+ attention_mask,
305
+ dropout=0.0 if not self.training else self.attention_dropout,
306
+ scaling=self.scaling,
307
+ **kwargs,
308
+ )
309
+
310
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
311
+ attn_output = self.o_proj(attn_output)
312
+ return attn_output, attn_weights
313
+
314
+
315
+ class MiniMaxM2DecoderLayer(GradientCheckpointingLayer):
316
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
317
+ super().__init__()
318
+ self.hidden_size = config.hidden_size
319
+
320
+ self.self_attn = MiniMaxM2Attention(config, layer_idx)
321
+
322
+ self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config)
323
+ self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
324
+ self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
325
+
326
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
327
+ def forward(
328
+ self,
329
+ hidden_states: torch.Tensor,
330
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
331
+ attention_mask: Optional[torch.Tensor] = None,
332
+ position_ids: Optional[torch.LongTensor] = None,
333
+ past_key_values: Optional[Cache] = None,
334
+ cache_position: Optional[torch.LongTensor] = None,
335
+ **kwargs: Unpack[TransformersKwargs],
336
+ ) -> torch.FloatTensor:
337
+ residual = hidden_states
338
+
339
+ hidden_states = self.input_layernorm(hidden_states)
340
+
341
+ # Self Attention
342
+ hidden_states, _ = self.self_attn(
343
+ hidden_states=hidden_states,
344
+ position_embeddings=position_embeddings,
345
+ attention_mask=attention_mask,
346
+ position_ids=position_ids,
347
+ past_key_values=past_key_values,
348
+ cache_position=cache_position,
349
+ **kwargs,
350
+ )
351
+ hidden_states = residual + hidden_states
352
+
353
+ # Fully Connected
354
+ residual = hidden_states
355
+ hidden_states = self.post_attention_layernorm(hidden_states)
356
+ hidden_states, _ = self.block_sparse_moe(hidden_states)
357
+ hidden_states = residual + hidden_states
358
+
359
+ return hidden_states
360
+
361
+
362
+ class MiniMaxM2RotaryEmbedding(nn.Module):
363
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
364
+
365
+ def __init__(self, config: MiniMaxM2Config, device=None):
366
+ super().__init__()
367
+ # BC: "rope_type" was originally "type"
368
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
369
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
370
+ else:
371
+ self.rope_type = "default"
372
+ self.max_seq_len_cached = config.max_position_embeddings
373
+ self.original_max_seq_len = config.max_position_embeddings
374
+
375
+ self.config = config
376
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
377
+
378
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
379
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
380
+ self.original_inv_freq = self.inv_freq
381
+
382
+ @torch.no_grad()
383
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
384
+ def forward(self, x, position_ids):
385
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
386
+ position_ids_expanded = position_ids[:, None, :].float()
387
+
388
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
389
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
390
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
391
+ emb = torch.cat((freqs, freqs), dim=-1)
392
+ cos = emb.cos() * self.attention_scaling
393
+ sin = emb.sin() * self.attention_scaling
394
+
395
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
396
+
397
+
398
+ @auto_docstring
399
+ class MiniMaxM2PreTrainedModel(PreTrainedModel):
400
+ config: MiniMaxM2Config
401
+ base_model_prefix = "model"
402
+ supports_gradient_checkpointing = True
403
+ _no_split_modules = ["MiniMaxM2DecoderLayer"]
404
+ _skip_keys_device_placement = ["past_key_values"]
405
+ _supports_flash_attn = True
406
+ _supports_sdpa = True
407
+ _supports_flex_attn = True
408
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
409
+ _supports_attention_backend = True
410
+ _can_record_outputs = {
411
+ "router_logits": OutputRecorder(MiniMaxM2SparseMoeBlock, index=1),
412
+ "hidden_states": MiniMaxM2DecoderLayer,
413
+ "attentions": MiniMaxM2Attention,
414
+ }
415
+
416
+
417
+ @auto_docstring
418
+ class MiniMaxM2Model(MiniMaxM2PreTrainedModel):
419
+ def __init__(self, config: MiniMaxM2Config):
420
+ super().__init__(config)
421
+ self.padding_idx = config.pad_token_id
422
+ self.vocab_size = config.vocab_size
423
+
424
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
425
+ self.layers = nn.ModuleList(
426
+ [MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
427
+ )
428
+ self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
429
+ self.rotary_emb = MiniMaxM2RotaryEmbedding(config=config)
430
+ self.gradient_checkpointing = False
431
+
432
+ # Initialize weights and apply final processing
433
+ self.post_init()
434
+
435
+ @check_model_inputs
436
+ @auto_docstring
437
+ def forward(
438
+ self,
439
+ input_ids: Optional[torch.LongTensor] = None,
440
+ attention_mask: Optional[torch.Tensor] = None,
441
+ position_ids: Optional[torch.LongTensor] = None,
442
+ past_key_values: Optional[Cache] = None,
443
+ inputs_embeds: Optional[torch.FloatTensor] = None,
444
+ use_cache: Optional[bool] = None,
445
+ cache_position: Optional[torch.LongTensor] = None,
446
+ **kwargs: Unpack[TransformersKwargs],
447
+ ) -> MoeModelOutputWithPast:
448
+ if (input_ids is None) ^ (inputs_embeds is not None):
449
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
450
+
451
+ if use_cache and past_key_values is None:
452
+ past_key_values = DynamicCache(config=self.config)
453
+
454
+ if inputs_embeds is None:
455
+ inputs_embeds = self.embed_tokens(input_ids)
456
+
457
+ if cache_position is None:
458
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
459
+ cache_position = torch.arange(
460
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
461
+ )
462
+ if position_ids is None:
463
+ position_ids = cache_position.unsqueeze(0)
464
+
465
+ mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
466
+ causal_mask = mask_function(
467
+ config=self.config,
468
+ input_embeds=inputs_embeds,
469
+ attention_mask=attention_mask,
470
+ cache_position=cache_position,
471
+ past_key_values=past_key_values,
472
+ position_ids=position_ids,
473
+ )
474
+
475
+ hidden_states = inputs_embeds
476
+
477
+ # create position embeddings to be shared across the decoder layers
478
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
479
+
480
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
481
+ hidden_states = decoder_layer(
482
+ hidden_states,
483
+ position_embeddings=position_embeddings,
484
+ attention_mask=causal_mask,
485
+ position_ids=position_ids,
486
+ past_key_values=past_key_values,
487
+ use_cache=use_cache,
488
+ cache_position=cache_position,
489
+ **kwargs,
490
+ )
491
+
492
+ hidden_states = self.norm(hidden_states)
493
+
494
+ return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
495
+ last_hidden_state=hidden_states,
496
+ past_key_values=past_key_values,
497
+ )
498
+
499
+
500
+ def load_balancing_loss_func(
501
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
502
+ num_experts: Optional[int] = None,
503
+ top_k=2,
504
+ attention_mask: Optional[torch.Tensor] = None,
505
+ ) -> Union[torch.Tensor, int]:
506
+ r"""
507
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
508
+
509
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
510
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
511
+ experts is too unbalanced.
512
+
513
+ Args:
514
+ gate_logits:
515
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
516
+ shape [batch_size X sequence_length, num_experts].
517
+ num_experts:
518
+ Number of experts
519
+ top_k:
520
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
521
+ parameter.
522
+ attention_mask (`torch.Tensor`, *optional*):
523
+ The attention_mask used in forward function
524
+ shape [batch_size X sequence_length] if not None.
525
+
526
+ Returns:
527
+ The auxiliary loss.
528
+ """
529
+ if gate_logits is None or not isinstance(gate_logits, tuple):
530
+ return 0
531
+
532
+ if isinstance(gate_logits, tuple):
533
+ compute_device = gate_logits[0].device
534
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
535
+
536
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
537
+
538
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
539
+
540
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
541
+
542
+ if attention_mask is None:
543
+ # Compute the percentage of tokens routed to each experts
544
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
545
+
546
+ # Compute the average probability of routing to these experts
547
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
548
+ else:
549
+ batch_size, sequence_length = attention_mask.shape
550
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
551
+
552
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
553
+ expert_attention_mask = (
554
+ attention_mask[None, :, :, None, None]
555
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
556
+ .reshape(-1, top_k, num_experts)
557
+ .to(compute_device)
558
+ )
559
+
560
+ # Compute the percentage of tokens routed to each experts
561
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
562
+ expert_attention_mask, dim=0
563
+ )
564
+
565
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
566
+ router_per_expert_attention_mask = (
567
+ attention_mask[None, :, :, None]
568
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
569
+ .reshape(-1, num_experts)
570
+ .to(compute_device)
571
+ )
572
+
573
+ # Compute the average probability of routing to these experts
574
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
575
+ router_per_expert_attention_mask, dim=0
576
+ )
577
+
578
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
579
+ return overall_loss * num_experts
580
+
581
+
582
+ @auto_docstring
583
+ class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin):
584
+ _tied_weights_keys = ["lm_head.weight"]
585
+ _tp_plan = {"lm_head": "colwise_rep"}
586
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
587
+
588
+ def __init__(self, config):
589
+ super().__init__(config)
590
+ self.model = MiniMaxM2Model(config)
591
+ self.vocab_size = config.vocab_size
592
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
593
+ self.router_aux_loss_coef = config.router_aux_loss_coef
594
+ self.num_experts = config.num_local_experts
595
+ self.num_experts_per_tok = config.num_experts_per_tok
596
+
597
+ # Initialize weights and apply final processing
598
+ self.post_init()
599
+
600
+ @can_return_tuple
601
+ @auto_docstring
602
+ def forward(
603
+ self,
604
+ input_ids: Optional[torch.LongTensor] = None,
605
+ attention_mask: Optional[torch.Tensor] = None,
606
+ position_ids: Optional[torch.LongTensor] = None,
607
+ past_key_values: Optional[Cache] = None,
608
+ inputs_embeds: Optional[torch.FloatTensor] = None,
609
+ labels: Optional[torch.LongTensor] = None,
610
+ use_cache: Optional[bool] = None,
611
+ output_router_logits: Optional[bool] = None,
612
+ cache_position: Optional[torch.LongTensor] = None,
613
+ logits_to_keep: Union[int, torch.Tensor] = 0,
614
+ **kwargs: Unpack[TransformersKwargs],
615
+ ) -> MoeCausalLMOutputWithPast:
616
+ r"""
617
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
618
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
619
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
620
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
621
+
622
+ Example:
623
+
624
+ ```python
625
+ >>> from transformers import AutoTokenizer, MiniMaxM2ForCausalLM
626
+
627
+ >>> model = MiniMaxM2ForCausalLM.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
628
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
629
+
630
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
631
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
632
+
633
+ >>> # Generate
634
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
635
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
636
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
637
+ ```"""
638
+
639
+ output_router_logits = (
640
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
641
+ )
642
+
643
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
644
+ outputs: MoeModelOutputWithPast = self.model(
645
+ input_ids=input_ids,
646
+ attention_mask=attention_mask,
647
+ position_ids=position_ids,
648
+ past_key_values=past_key_values,
649
+ inputs_embeds=inputs_embeds,
650
+ use_cache=use_cache,
651
+ output_router_logits=output_router_logits,
652
+ cache_position=cache_position,
653
+ **kwargs,
654
+ )
655
+
656
+ hidden_states = outputs.last_hidden_state
657
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
658
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
659
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
660
+
661
+ loss = None
662
+ if labels is not None:
663
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
664
+
665
+ aux_loss = None
666
+ if output_router_logits:
667
+ aux_loss = load_balancing_loss_func(
668
+ outputs.router_logits,
669
+ self.num_experts,
670
+ self.num_experts_per_tok,
671
+ attention_mask,
672
+ )
673
+ if labels is not None:
674
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
675
+
676
+ return MoeCausalLMOutputWithPast(
677
+ loss=loss,
678
+ aux_loss=aux_loss,
679
+ logits=logits,
680
+ past_key_values=outputs.past_key_values,
681
+ hidden_states=outputs.hidden_states,
682
+ attentions=outputs.attentions,
683
+ router_logits=outputs.router_logits,
684
+ )
685
+
686
+
687
+ class MiniMaxM2ForSequenceClassification(GenericForSequenceClassification, MiniMaxM2PreTrainedModel):
688
+ pass
689
+
690
+
691
+ class MiniMaxM2ForTokenClassification(GenericForTokenClassification, MiniMaxM2PreTrainedModel):
692
+ pass
693
+
694
+
695
+ class MiniMaxM2ForQuestionAnswering(GenericForQuestionAnswering, MiniMaxM2PreTrainedModel):
696
+ pass
697
+
698
+
699
+ __all__ = [
700
+ "MiniMaxM2ForCausalLM",
701
+ "MiniMaxM2ForQuestionAnswering",
702
+ "MiniMaxM2Model",
703
+ "MiniMaxM2PreTrainedModel",
704
+ "MiniMaxM2ForSequenceClassification",
705
+ "MiniMaxM2ForTokenClassification",
706
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<code_interpreter>",
4
+ "<commit_after>",
5
+ "<commit_before>",
6
+ "<commit_msg>",
7
+ "<empty_output>",
8
+ "<filename>",
9
+ "<fim_middle>",
10
+ "<fim_pad>",
11
+ "<fim_prefix>",
12
+ "<fim_suffix>",
13
+ "<function_call>",
14
+ "<gh_stars>",
15
+ "]<]speech[>[",
16
+ "]<]image[>[",
17
+ "]<]video[>[",
18
+ "]<]start of speech[>[",
19
+ "]<]end of speech[>[",
20
+ "]<]start of image[>[",
21
+ "]<]end of image[>[",
22
+ "]<]start of video[>[",
23
+ "]<]end of video[>[",
24
+ "]<]vision pad[>[",
25
+ "]~!b[",
26
+ "<issue_closed>",
27
+ "<issue_comment>",
28
+ "<issue_start>",
29
+ "<jupyter_code>",
30
+ "<jupyter_output>",
31
+ "<jupyter_start>",
32
+ "<jupyter_text>",
33
+ "<reponame>",
34
+ "[e~[",
35
+ "]!d~[",
36
+ "]!p~[",
37
+ "]~b]",
38
+ "<jupyter_error>",
39
+ "<add_file>",
40
+ "<delete_file>",
41
+ "<rename_file>",
42
+ "<edit_file>",
43
+ "<commit_message>",
44
+ "<empty_source_file>",
45
+ "<repo_struct>",
46
+ "<code_context>",
47
+ "<file_content>",
48
+ "<source_files>",
49
+ "<pr_start>",
50
+ "<review_comment>",
51
+ "<filepath>",
52
+ "<file_sep>"
53
+ ],
54
+ "bos_token": {
55
+ "content": "]~!b[",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false
60
+ },
61
+ "eos_token": {
62
+ "content": "[e~[",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false
67
+ },
68
+ "unk_token": {
69
+ "content": "]!d~[",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false
74
+ }
75
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7b90ed7f55d905175bc26771d6d7d33b40b46742f073675bc816fedaf482ea1
3
+ size 15522763
tokenizer_config.json ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "200000": {
5
+ "content": "]!p~[",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "200001": {
13
+ "content": "<fim_prefix>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "200002": {
21
+ "content": "<fim_middle>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "200003": {
29
+ "content": "<fim_suffix>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "200004": {
37
+ "content": "<fim_pad>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "200005": {
45
+ "content": "<reponame>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "200006": {
53
+ "content": "<filename>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "200007": {
61
+ "content": "<gh_stars>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "200008": {
69
+ "content": "<issue_start>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "200009": {
77
+ "content": "<issue_comment>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "200010": {
85
+ "content": "<issue_closed>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "200011": {
93
+ "content": "<jupyter_start>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "200012": {
101
+ "content": "<jupyter_text>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "200013": {
109
+ "content": "<jupyter_code>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "200014": {
117
+ "content": "<jupyter_output>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "200015": {
125
+ "content": "<empty_output>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "200016": {
133
+ "content": "<commit_before>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "200017": {
141
+ "content": "<commit_msg>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "200018": {
149
+ "content": "<commit_after>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": true
155
+ },
156
+ "200019": {
157
+ "content": "]~b]",
158
+ "lstrip": false,
159
+ "normalized": false,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": true
163
+ },
164
+ "200020": {
165
+ "content": "[e~[",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "200021": {
173
+ "content": "]!d~[",
174
+ "lstrip": false,
175
+ "normalized": false,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": true
179
+ },
180
+ "200022": {
181
+ "content": "<function_call>",
182
+ "lstrip": false,
183
+ "normalized": false,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": true
187
+ },
188
+ "200023": {
189
+ "content": "<code_interpreter>",
190
+ "lstrip": false,
191
+ "normalized": false,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": true
195
+ },
196
+ "200024": {
197
+ "content": "]<]speech[>[",
198
+ "lstrip": false,
199
+ "normalized": false,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": true
203
+ },
204
+ "200025": {
205
+ "content": "]<]image[>[",
206
+ "lstrip": false,
207
+ "normalized": false,
208
+ "rstrip": false,
209
+ "single_word": false,
210
+ "special": true
211
+ },
212
+ "200026": {
213
+ "content": "]<]video[>[",
214
+ "lstrip": false,
215
+ "normalized": false,
216
+ "rstrip": false,
217
+ "single_word": false,
218
+ "special": true
219
+ },
220
+ "200027": {
221
+ "content": "]<]start of speech[>[",
222
+ "lstrip": false,
223
+ "normalized": false,
224
+ "rstrip": false,
225
+ "single_word": false,
226
+ "special": true
227
+ },
228
+ "200028": {
229
+ "content": "]<]end of speech[>[",
230
+ "lstrip": false,
231
+ "normalized": false,
232
+ "rstrip": false,
233
+ "single_word": false,
234
+ "special": true
235
+ },
236
+ "200029": {
237
+ "content": "]<]start of image[>[",
238
+ "lstrip": false,
239
+ "normalized": false,
240
+ "rstrip": false,
241
+ "single_word": false,
242
+ "special": true
243
+ },
244
+ "200030": {
245
+ "content": "]<]end of image[>[",
246
+ "lstrip": false,
247
+ "normalized": false,
248
+ "rstrip": false,
249
+ "single_word": false,
250
+ "special": true
251
+ },
252
+ "200031": {
253
+ "content": "]<]start of video[>[",
254
+ "lstrip": false,
255
+ "normalized": false,
256
+ "rstrip": false,
257
+ "single_word": false,
258
+ "special": true
259
+ },
260
+ "200032": {
261
+ "content": "]<]end of video[>[",
262
+ "lstrip": false,
263
+ "normalized": false,
264
+ "rstrip": false,
265
+ "single_word": false,
266
+ "special": true
267
+ },
268
+ "200033": {
269
+ "content": "]<]vision pad[>[",
270
+ "lstrip": false,
271
+ "normalized": false,
272
+ "rstrip": false,
273
+ "single_word": false,
274
+ "special": true
275
+ },
276
+ "200034": {
277
+ "content": "]~!b[",
278
+ "lstrip": false,
279
+ "normalized": false,
280
+ "rstrip": false,
281
+ "single_word": false,
282
+ "special": true
283
+ },
284
+ "200035": {
285
+ "content": "<jupyter_error>",
286
+ "lstrip": false,
287
+ "normalized": false,
288
+ "rstrip": false,
289
+ "single_word": false,
290
+ "special": true
291
+ },
292
+ "200036": {
293
+ "content": "<add_file>",
294
+ "lstrip": false,
295
+ "normalized": false,
296
+ "rstrip": false,
297
+ "single_word": false,
298
+ "special": true
299
+ },
300
+ "200037": {
301
+ "content": "<delete_file>",
302
+ "lstrip": false,
303
+ "normalized": false,
304
+ "rstrip": false,
305
+ "single_word": false,
306
+ "special": true
307
+ },
308
+ "200038": {
309
+ "content": "<rename_file>",
310
+ "lstrip": false,
311
+ "normalized": false,
312
+ "rstrip": false,
313
+ "single_word": false,
314
+ "special": true
315
+ },
316
+ "200039": {
317
+ "content": "<edit_file>",
318
+ "lstrip": false,
319
+ "normalized": false,
320
+ "rstrip": false,
321
+ "single_word": false,
322
+ "special": true
323
+ },
324
+ "200040": {
325
+ "content": "<commit_message>",
326
+ "lstrip": false,
327
+ "normalized": false,
328
+ "rstrip": false,
329
+ "single_word": false,
330
+ "special": true
331
+ },
332
+ "200041": {
333
+ "content": "<empty_source_file>",
334
+ "lstrip": false,
335
+ "normalized": false,
336
+ "rstrip": false,
337
+ "single_word": false,
338
+ "special": true
339
+ },
340
+ "200042": {
341
+ "content": "<repo_struct>",
342
+ "lstrip": false,
343
+ "normalized": false,
344
+ "rstrip": false,
345
+ "single_word": false,
346
+ "special": true
347
+ },
348
+ "200043": {
349
+ "content": "<code_context>",
350
+ "lstrip": false,
351
+ "normalized": false,
352
+ "rstrip": false,
353
+ "single_word": false,
354
+ "special": true
355
+ },
356
+ "200044": {
357
+ "content": "<file_content>",
358
+ "lstrip": false,
359
+ "normalized": false,
360
+ "rstrip": false,
361
+ "single_word": false,
362
+ "special": true
363
+ },
364
+ "200045": {
365
+ "content": "<source_files>",
366
+ "lstrip": false,
367
+ "normalized": false,
368
+ "rstrip": false,
369
+ "single_word": false,
370
+ "special": true
371
+ },
372
+ "200046": {
373
+ "content": "<pr_start>",
374
+ "lstrip": false,
375
+ "normalized": false,
376
+ "rstrip": false,
377
+ "single_word": false,
378
+ "special": true
379
+ },
380
+ "200047": {
381
+ "content": "<review_comment>",
382
+ "lstrip": false,
383
+ "normalized": false,
384
+ "rstrip": false,
385
+ "single_word": false,
386
+ "special": true
387
+ },
388
+ "200048": {
389
+ "content": "<filepath>",
390
+ "lstrip": false,
391
+ "normalized": false,
392
+ "rstrip": false,
393
+ "single_word": false,
394
+ "special": true
395
+ },
396
+ "200049": {
397
+ "content": "<file_sep>",
398
+ "lstrip": false,
399
+ "normalized": false,
400
+ "rstrip": false,
401
+ "single_word": false,
402
+ "special": true
403
+ },
404
+ "200050": {
405
+ "content": "<think>",
406
+ "lstrip": false,
407
+ "normalized": false,
408
+ "rstrip": false,
409
+ "single_word": false,
410
+ "special": false
411
+ },
412
+ "200051": {
413
+ "content": "</think>",
414
+ "lstrip": false,
415
+ "normalized": false,
416
+ "rstrip": false,
417
+ "single_word": false,
418
+ "special": false
419
+ },
420
+ "200052": {
421
+ "content": "<minimax:tool_call>",
422
+ "lstrip": false,
423
+ "normalized": false,
424
+ "rstrip": false,
425
+ "single_word": false,
426
+ "special": false
427
+ },
428
+ "200053": {
429
+ "content": "</minimax:tool_call>",
430
+ "lstrip": false,
431
+ "normalized": false,
432
+ "rstrip": false,
433
+ "single_word": false,
434
+ "special": false
435
+ }
436
+ },
437
+ "additional_special_tokens": [
438
+ "<code_interpreter>",
439
+ "<commit_after>",
440
+ "<commit_before>",
441
+ "<commit_msg>",
442
+ "<empty_output>",
443
+ "<filename>",
444
+ "<fim_middle>",
445
+ "<fim_pad>",
446
+ "<fim_prefix>",
447
+ "<fim_suffix>",
448
+ "<function_call>",
449
+ "<gh_stars>",
450
+ "]<]speech[>[",
451
+ "]<]image[>[",
452
+ "]<]video[>[",
453
+ "]<]start of speech[>[",
454
+ "]<]end of speech[>[",
455
+ "]<]start of image[>[",
456
+ "]<]end of image[>[",
457
+ "]<]start of video[>[",
458
+ "]<]end of video[>[",
459
+ "]<]vision pad[>[",
460
+ "]~!b[",
461
+ "<issue_closed>",
462
+ "<issue_comment>",
463
+ "<issue_start>",
464
+ "<jupyter_code>",
465
+ "<jupyter_output>",
466
+ "<jupyter_start>",
467
+ "<jupyter_text>",
468
+ "<reponame>",
469
+ "[e~[",
470
+ "]!d~[",
471
+ "]!p~[",
472
+ "]~b]",
473
+ "<jupyter_error>",
474
+ "<add_file>",
475
+ "<delete_file>",
476
+ "<rename_file>",
477
+ "<edit_file>",
478
+ "<commit_message>",
479
+ "<empty_source_file>",
480
+ "<repo_struct>",
481
+ "<code_context>",
482
+ "<file_content>",
483
+ "<source_files>",
484
+ "<pr_start>",
485
+ "<review_comment>",
486
+ "<filepath>",
487
+ "<file_sep>"
488
+ ],
489
+ "bos_token": "]~!b[",
490
+ "clean_up_tokenization_spaces": false,
491
+ "eos_token": "[e~[",
492
+ "extra_special_tokens": {},
493
+ "model_max_length": 40960000,
494
+ "tokenizer_class": "GPT2Tokenizer",
495
+ "unk_token": "]!d~["
496
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff