Fredtt3 commited on
Commit
2cb0949
·
verified ·
1 Parent(s): dd2fa85

New checkpoint trained in 8,000 steps and 65,536,000 tokens

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>
2
+
3
+ '+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>
4
+
5
+ ' }}
config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_type": "swiglu",
3
+ "alibi": false,
4
+ "alibi_bias_max": 8.0,
5
+ "architectures": [
6
+ "LLaDAModelLM"
7
+ ],
8
+ "auto_map": {
9
+ "AutoConfig": "configs_llada.LLaDAConfig",
10
+ "AutoModelForCausalLM": "model.LLaDAModelLM",
11
+ "AutoModel": "model.LLaDAModelLM"
12
+ },
13
+ "attention_dropout": 0.0,
14
+ "attention_layer_norm": false,
15
+ "attention_layer_norm_with_affine": true,
16
+ "bias_for_layer_norm": null,
17
+ "block_group_size": 1,
18
+ "block_type": "llama",
19
+ "d_model": 768,
20
+ "embedding_dropout": 0.0,
21
+ "embedding_size": 126464,
22
+ "eos_token_id": 126081,
23
+ "flash_attention": false,
24
+ "include_bias": false,
25
+ "include_qkv_bias": false,
26
+ "init_cutoff_factor": null,
27
+ "init_device": "cuda:0",
28
+ "init_fn": "mitchell",
29
+ "init_std": 0.02,
30
+ "input_emb_norm": false,
31
+ "layer_norm_type": "rms",
32
+ "layer_norm_with_affine": true,
33
+ "mask_token_id": 126336,
34
+ "max_sequence_length": 4096,
35
+ "mlp_hidden_size": 3072,
36
+ "mlp_ratio": 4,
37
+ "model_type": "llada",
38
+ "multi_query_attention": null,
39
+ "n_heads": 12,
40
+ "n_kv_heads": 12,
41
+ "n_layers": 14,
42
+ "pad_token_id": 126081,
43
+ "precision": "bf16",
44
+ "residual_dropout": 0.0,
45
+ "rms_norm_eps": 1e-05,
46
+ "rope": true,
47
+ "rope_full_precision": true,
48
+ "rope_theta": 500000.0,
49
+ "scale_logits": false,
50
+ "torch_dtype": "float32",
51
+ "transformers_version": "4.54.1",
52
+ "use_cache": false,
53
+ "vocab_size": 126464,
54
+ "weight_tying": false
55
+ }
configs_llada.py ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LLaDA configuration
3
+ """
4
+ from transformers import AutoConfig
5
+ from transformers.configuration_utils import PretrainedConfig
6
+
7
+ from enum import Enum
8
+ from os import PathLike
9
+ from typing import Union
10
+ from dataclasses import dataclass
11
+ from typing import (
12
+ Optional,
13
+ Union,
14
+ )
15
+
16
+
17
+ __all__ = [
18
+ "ActivationType",
19
+ "ActivationCheckpointingStrategy",
20
+ "BlockType",
21
+ "LayerNormType",
22
+ "InitFnType",
23
+ "ModelConfig",
24
+ ]
25
+
26
+ PathOrStr = Union[str, PathLike]
27
+
28
+
29
+ class StrEnum(str, Enum):
30
+ """
31
+ This is equivalent to Python's :class:`enum.StrEnum` since version 3.11.
32
+ We include this here for compatibility with older version of Python.
33
+ """
34
+
35
+ def __str__(self) -> str:
36
+ return self.value
37
+
38
+ def __repr__(self) -> str:
39
+ return f"'{str(self)}'"
40
+
41
+
42
+ class LayerNormType(StrEnum):
43
+ default = "default"
44
+ """
45
+ The default LayerNorm implementation, equivalent to PyTorch's built-in version.
46
+ """
47
+
48
+ low_precision = "low_precision"
49
+ """
50
+ A low-precision version of the default LayerNorm.
51
+ """
52
+
53
+ rms = "rms"
54
+ """
55
+ An RMSNorm implementation. When using ``torch.compile`` this is
56
+ probably the fastest implementation.
57
+ """
58
+
59
+ gemma_rms = "gemma_rms"
60
+ """
61
+ An RMSNorm implementation by gemmma. When using ``torch.compile`` this is
62
+ probably the fastest implementation.
63
+ """
64
+
65
+ amd_compatible = "amd_compatible"
66
+ """
67
+ LayerNorm implemented manually to work around an issue with ROCm.
68
+ """
69
+
70
+
71
+ class ActivationType(StrEnum):
72
+ gelu = "gelu"
73
+ relu = "relu"
74
+ silu = "silu"
75
+ swiglu = "swiglu"
76
+
77
+
78
+ class BlockType(StrEnum):
79
+ sequential = "sequential"
80
+ parallel = "parallel"
81
+
82
+ llama = "llama"
83
+ """
84
+ A block similar to the sequential block with slightly different
85
+ implementations of operations like attention to imitate the behavior of Llama.
86
+ """
87
+
88
+
89
+ class InitFnType(StrEnum):
90
+ mitchell = "mitchell"
91
+ """
92
+ The strategy suggested to us by Mitchell Wortsman from UW.
93
+ This uses a truncated normal distribution with an adaptive standard deviation that depends
94
+ on the size of the weights as well as the depth of the layer.
95
+ """
96
+
97
+ normal = "normal"
98
+ """
99
+ All weights are initialized from the same normal distribution.
100
+ """
101
+
102
+ kaiming_normal = "kaiming_normal"
103
+ """
104
+ All weights are initialized with the Kaiming method from a normal distribution.
105
+ Note this currently won't work with FSDP.
106
+ """
107
+
108
+ fan_in = "fan_in"
109
+ """
110
+ "Fan-in variance scaling", i.e. normal with a standard deviation of ``1/sqrt(d_in)`` where ``d_in``
111
+ is the input dimensionality of the kernel.
112
+ """
113
+
114
+ full_megatron = "full_megatron"
115
+ """
116
+ This is what metaseq calls "full megatron init". It is the init used for Llama 2.
117
+ """
118
+
119
+
120
+ @dataclass
121
+ class ModelConfig():
122
+ """
123
+ LLaDA (model) configuration.
124
+ """
125
+
126
+ # Note that the defaults for these attributes are equivalent to the base GPT2 model.
127
+
128
+ d_model: int = 768
129
+ """
130
+ The hidden size of the model.
131
+ """
132
+
133
+ n_heads: int = 12
134
+ """
135
+ The number of self-attention heads.
136
+ """
137
+
138
+ n_kv_heads: Optional[int] = None
139
+ """
140
+ The number of heads to use for keys and values. Defaults to `n_heads`.
141
+ Set this to ``None`` or ``n_heads`` for normal multi-head attention.
142
+ Set this to 1 for multi-query attention.
143
+ Set it to some in-between value for Llama2-style grouped query attention.
144
+ """
145
+
146
+ n_layers: int = 12
147
+ """
148
+ The number of layers/blocks.
149
+ """
150
+
151
+ mlp_ratio: int = 4
152
+ """
153
+ The ratio of the inner MLP dimensionality to ``d_model``.
154
+ This is only used when ``mlp_hidden_size`` is not set.
155
+ """
156
+
157
+ mlp_hidden_size: Optional[int] = None
158
+ """
159
+ Set the exact hidden size for the MLP. Otherwise the inner MLP hidden size will be set to `mlp_ratio * d_model`.
160
+ """
161
+
162
+ activation_type: ActivationType = ActivationType.swiglu
163
+ """
164
+ The activation function to use within the MLP layers.
165
+ """
166
+
167
+ block_type: BlockType = BlockType.sequential
168
+ """
169
+ The transformer block implementation.
170
+ """
171
+
172
+ block_group_size: int = 1
173
+ """
174
+ The number of blocks to group together into a single parent block.
175
+ This has no affect on the number of parameters in the model and is only used to wrap groups
176
+ of blocks together with a single FSDP wrapper during training.
177
+ """
178
+
179
+ alibi: bool = False
180
+ """
181
+ If ``True``, use ALiBi embeddings. Mutually exclusive with ``rope``.
182
+ """
183
+
184
+ alibi_bias_max: float = 8.0
185
+ """
186
+ Maximum absolute value of ALiBi bias.
187
+ """
188
+
189
+ rope: bool = False
190
+ """
191
+ Use rotary positional embeddings (RoPE). Mutually exclusive with ``alibi``.
192
+ """
193
+
194
+ rope_full_precision: bool = True
195
+ """
196
+ If ``True``, apply RoPE embeddings at full precision regardless of the input type. Otherwise,
197
+ apply RoPE at the precision of the input.
198
+ """
199
+
200
+ flash_attention: bool = False
201
+ """
202
+ If ``True``, use ``FlashAttention``.
203
+ """
204
+
205
+ attention_dropout: float = 0.1
206
+ """
207
+ The dropout probability within the attention modules.
208
+ """
209
+
210
+ multi_query_attention: Optional[bool] = None
211
+ """
212
+ Use the Multi-Query formulation of attention used in PaLM. This reduces the number of parameters
213
+ and is more efficient during inference.
214
+ """
215
+
216
+ attention_layer_norm: bool = False
217
+ """
218
+ Apply layer norm to the keys and queries within the attention mechanism.
219
+ This can help stabilize training.
220
+ """
221
+
222
+ residual_dropout: float = 0.1
223
+ """
224
+ The dropout probability for the MLP and attention output within each block.
225
+ """
226
+
227
+ embedding_dropout: float = 0.1
228
+ """
229
+ The dropout probability for embeddings.
230
+ """
231
+
232
+ input_emb_norm: bool = False
233
+ """
234
+ An input hidden_states norm implementation by gemmma.
235
+ """
236
+
237
+ layer_norm_type: LayerNormType = LayerNormType.default
238
+ """
239
+ The layernorm implementation to use.
240
+ """
241
+
242
+ layer_norm_with_affine: bool = True
243
+ """
244
+ Whether to include bias and weight parameters for the layer norms.
245
+ This only affects layer norms that are immediately followed by a linear layer in the forward pass,
246
+ so everything except QK-norms. To turn off affines for QK norms as well, set :attr:`attention_layer_norm_with_affine`
247
+ to ``False``.
248
+ """
249
+
250
+ rms_norm_eps: float = 1e-05
251
+ """
252
+ The rms layernorm eps param.
253
+ """
254
+
255
+ attention_layer_norm_with_affine: bool = True
256
+ """
257
+ Toggle affine transform for the QK norms.
258
+ """
259
+
260
+ max_sequence_length: int = 1024
261
+ """
262
+ The maximum input sequence length supported by the model.
263
+ """
264
+
265
+ rope_theta: float = 10000.0
266
+ """
267
+ The rope base param.
268
+ """
269
+
270
+ include_qkv_bias: Optional[bool] = False
271
+ """
272
+ Whether or not to include bias parameters in qkv linear layers.
273
+ """
274
+
275
+ include_bias: bool = False
276
+ """
277
+ Whether or not to include bias parameters in linear layers.
278
+ In PaLM, they got rid of all bias terms because they found that large
279
+ models tend to have near 0 bias terms anyway.
280
+ """
281
+
282
+ bias_for_layer_norm: Optional[bool] = None
283
+ """
284
+ Whether or not to include bias parameters in layer norm.
285
+ This is separate from the include_bias parameter, because of a ROCm crash when biases are disabled in
286
+ layer norm.
287
+ When this is None (the default), it inherits the setting from include_bias.
288
+ """
289
+
290
+ scale_logits: bool = False
291
+ """
292
+ If ``True``, scale the output logits by ``1 / sqrt(d_model)``.
293
+ """
294
+
295
+ vocab_size: int = 50257
296
+ """
297
+ Vocabulary size of the model.
298
+ """
299
+
300
+ embedding_size: Optional[int] = 50304
301
+ """
302
+ The number of embeddings, i.e. the number of tokens. If set to ``None`` it will default
303
+ to ``vocab_size``. If ``vocab_size`` is not a multiple of 128, setting this to the
304
+ next multiple of 128 that's greater than ``vocab_size`` can improve throughput
305
+ substantially.
306
+ """
307
+
308
+ weight_tying: bool = True
309
+ """
310
+ Whether to tie output linear weights to the input embedding.
311
+ """
312
+
313
+ eos_token_id: int = 50256
314
+ """
315
+ The ID of the end-of-sentence special token.
316
+ """
317
+
318
+ pad_token_id: int = 50256
319
+ """
320
+ The ID of the token to use for padding. Defaults to the ID of the EOS token.
321
+ """
322
+
323
+ mask_token_id: Optional[int] = 50256
324
+ """
325
+ The ID of the token to use for mask token. Defaults to the ID of the EOS token.
326
+ """
327
+
328
+ init_device: Optional[str] = None
329
+ """
330
+ The torch device to use when initializing the model parameters, e.g. "cpu", "cuda:0", "meta".
331
+ """
332
+
333
+ init_fn: InitFnType = InitFnType.normal
334
+ """
335
+ The weight initialization strategy.
336
+ """
337
+
338
+ init_std: float = 0.02
339
+ """
340
+ The standard deviation to use when initializing weights with a "fixed distribution" ``init_fn``, such
341
+ as "normal".
342
+ """
343
+
344
+ init_cutoff_factor: Optional[float] = None
345
+ """
346
+ A positive factor used to scale the cutoff values when initializing weights with a "fixed distribution" ``init_fn``, such
347
+ as "normal". Setting this to None means values are not cutoff.
348
+ """
349
+
350
+ precision: Optional[str] = None
351
+ """
352
+ Precision used to train/evaluate with. You shouldn't set this directly.
353
+ See :data:`TrainConfig.precision` instead.
354
+ """
355
+
356
+ @property
357
+ def effective_n_kv_heads(self) -> int:
358
+ if self.n_kv_heads is None:
359
+ if self.multi_query_attention is True:
360
+ return 1
361
+ else:
362
+ return self.n_heads
363
+ else:
364
+ if self.multi_query_attention is None:
365
+ return self.n_kv_heads
366
+ if self.multi_query_attention:
367
+ n_kv_heads_should_be = 1
368
+ else:
369
+ n_kv_heads_should_be = self.n_heads
370
+ if self.n_kv_heads == n_kv_heads_should_be:
371
+ return n_kv_heads_should_be
372
+ else:
373
+ raise Exception(
374
+ "You can't set `multi_query_attention` and `n_kv_heads` at the same time."
375
+ )
376
+
377
+ class ActivationCheckpointingStrategy(StrEnum):
378
+ whole_layer = "whole_layer"
379
+ """
380
+ Checkpoint every transformer layer.
381
+ """
382
+
383
+ one_in_two = "one_in_two"
384
+ """
385
+ Checkpoint one in two transformer layers.
386
+ """
387
+
388
+ one_in_three = "one_in_three"
389
+ """
390
+ Checkpoint one in three transformer layers.
391
+ """
392
+
393
+ one_in_four = "one_in_four"
394
+ """
395
+ Checkpoint one in four transformer layers.
396
+ """
397
+
398
+ two_in_three = "two_in_three"
399
+ """
400
+ Checkpoint two out of every three transformer layers.
401
+ """
402
+
403
+ three_in_four = "three_in_four"
404
+ """
405
+ Checkpoint three out of four of every transformer layers.
406
+ """
407
+
408
+ four_in_five = "four_in_five"
409
+ """
410
+ Checkpoint four out of five of every transformer layers.
411
+ """
412
+
413
+ nine_in_ten = "nine_in_ten"
414
+ """
415
+ Checkpoint nine out of ten of every transformer layers.
416
+ """
417
+
418
+ fine_grained = "fine_grained"
419
+ """
420
+ Focus checkpointing on where it is cheap to recompute and saves most memory.
421
+ """
422
+
423
+
424
+ class LLaDAConfig(PretrainedConfig):
425
+ model_type = "llada"
426
+ keys_to_ignore_at_inference = ["past_key_values"] # TODO: confirm
427
+
428
+ def __init__(self, use_cache: bool = False, **kwargs):
429
+ model_config = ModelConfig()
430
+ all_kwargs = model_config.__dict__
431
+ all_kwargs.update(kwargs)
432
+ all_kwargs.update({"use_cache": use_cache})
433
+ all_kwargs.update(
434
+ {
435
+ "architectures": all_kwargs.get("architectures", ["LLaDAModelLM"])
436
+ }
437
+ )
438
+ super().__init__(**all_kwargs)
439
+
440
+ @property
441
+ def num_attention_heads(self):
442
+ return self.n_heads
443
+
444
+ @property
445
+ def num_hidden_layers(self):
446
+ return self.n_layers
447
+
448
+ @property
449
+ def hidden_size(self):
450
+ return self.d_model
451
+
452
+
453
+ # Register the config class so that it is available for transformer pipelines, auto-loading etc.
454
+ AutoConfig.register("llada", LLaDAConfig)
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": 126081,
4
+ "pad_token_id": 126081,
5
+ "transformers_version": "4.54.1",
6
+ "use_cache": false
7
+ }
model.py ADDED
@@ -0,0 +1,1485 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import math
5
+ import sys
6
+ from abc import abstractmethod
7
+ from functools import partial
8
+ from typing import (
9
+ Callable,
10
+ Iterable,
11
+ List,
12
+ NamedTuple,
13
+ Optional,
14
+ Sequence,
15
+ Tuple,
16
+ cast,
17
+ )
18
+ from dataclasses import fields
19
+ from typing import List, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.backends.cuda
23
+ import torch.nn as nn
24
+ import torch.nn.functional as F
25
+ from torch import einsum
26
+ from transformers.modeling_utils import PreTrainedModel
27
+ from transformers.modeling_outputs import CausalLMOutputWithPast
28
+ from transformers.models.auto import AutoModel
29
+ from transformers.cache_utils import Cache
30
+
31
+ from .configs_llada import (
32
+ LLaDAConfig,
33
+ StrEnum,
34
+ InitFnType,
35
+ ActivationType,
36
+ BlockType,
37
+ LayerNormType,
38
+ ModelConfig,
39
+ ActivationCheckpointingStrategy,
40
+ )
41
+
42
+ if sys.version_info.minor > 8:
43
+ from collections.abc import MutableMapping
44
+ elif sys.version_info.minor == 8:
45
+ from typing import MutableMapping
46
+ else:
47
+ raise SystemExit("This script supports Python 3.8 or higher")
48
+
49
+ __all__ = [
50
+ "LayerNormBase",
51
+ "LayerNorm",
52
+ "RMSLayerNorm",
53
+ "GemmaRMSLayerNorm",
54
+ "RotaryEmbedding",
55
+ "Activation",
56
+ "GELU",
57
+ "ReLU",
58
+ "SwiGLU",
59
+ "LLaDABlock",
60
+ "LLaDASequentialBlock",
61
+ "LLaDAModel",
62
+ "LLaDAOutput",
63
+ "LLaDAGenerateOutput",
64
+ ]
65
+
66
+
67
+ log = logging.getLogger(__name__)
68
+
69
+
70
+ class ModuleType(StrEnum):
71
+ in_module = "in"
72
+ out_module = "out"
73
+ emb = "emb"
74
+ final_out = "final_out"
75
+
76
+
77
+ def init_weights(
78
+ config: ModelConfig,
79
+ module: Union[nn.Linear, nn.Embedding],
80
+ d: Optional[int] = None,
81
+ layer_id: Optional[int] = None,
82
+ std_factor: float = 1.0,
83
+ type_of_module: Optional[ModuleType] = None,
84
+ ) -> None:
85
+ """
86
+ Initialize weights of a linear or embedding module.
87
+ :param config: The model config.
88
+ :param module: The linear or embedding submodule to initialize.
89
+ :param d: The effective input dimensionality of the weights. This could be smaller than the actual dimensions
90
+ for fused layers.
91
+ :param layer_id: When set, the standard deviation for the "mitchell" method will be adjusted by
92
+ ``1 / sqrt(2 * (layer_id + 1))``.
93
+ """
94
+ d = d if d is not None else config.d_model
95
+ if config.init_fn == InitFnType.normal:
96
+ std = config.init_std * std_factor
97
+ if config.init_cutoff_factor is not None:
98
+ cutoff_value = config.init_cutoff_factor * std
99
+ nn.init.trunc_normal_(module.weight, mean=0.0, std=std, a=-cutoff_value, b=cutoff_value)
100
+ else:
101
+ nn.init.normal_(module.weight, mean=0.0, std=std)
102
+ elif config.init_fn == InitFnType.mitchell:
103
+ std = std_factor / math.sqrt(d)
104
+ if layer_id is not None:
105
+ std = std / math.sqrt(2 * (layer_id + 1))
106
+ nn.init.trunc_normal_(module.weight, mean=0.0, std=std, a=-3 * std, b=3 * std)
107
+ elif config.init_fn == InitFnType.kaiming_normal:
108
+ nn.init.kaiming_normal_(module.weight, nonlinearity="relu")
109
+ elif config.init_fn == InitFnType.fan_in:
110
+ std = std_factor / math.sqrt(d)
111
+ nn.init.normal_(module.weight, mean=0.0, std=std)
112
+ elif config.init_fn == InitFnType.full_megatron:
113
+ if type_of_module is None:
114
+ raise RuntimeError(f"When using the {InitFnType.full_megatron} init, every module must have a type.")
115
+
116
+ cutoff_factor = config.init_cutoff_factor
117
+ if cutoff_factor is None:
118
+ cutoff_factor = 3
119
+
120
+ if type_of_module == ModuleType.in_module:
121
+ # for att_proj (same as QKV), ff_proj
122
+ std = config.init_std
123
+ elif type_of_module == ModuleType.out_module:
124
+ # for attn_out, ff_out
125
+ std = config.init_std / math.sqrt(2.0 * config.n_layers)
126
+ elif type_of_module == ModuleType.emb:
127
+ # positional embeddings (wpe)
128
+ # token embeddings (wte)
129
+ std = config.init_std
130
+ elif type_of_module == ModuleType.final_out:
131
+ # final output (ff_out)
132
+ std = config.d_model**-0.5
133
+ else:
134
+ raise RuntimeError(f"Unknown module type '{type_of_module}'")
135
+ nn.init.trunc_normal_(
136
+ module.weight,
137
+ mean=0.0,
138
+ std=std,
139
+ a=-cutoff_factor * std,
140
+ b=cutoff_factor * std,
141
+ )
142
+ else:
143
+ raise NotImplementedError(config.init_fn)
144
+
145
+ if isinstance(module, nn.Linear):
146
+ if module.bias is not None:
147
+ nn.init.zeros_(module.bias)
148
+
149
+ if config.init_fn == InitFnType.normal and getattr(module, "_is_residual", False):
150
+ with torch.no_grad():
151
+ module.weight.div_(math.sqrt(2 * config.n_layers))
152
+
153
+
154
+ def ensure_finite_(x: torch.Tensor, check_neg_inf: bool = True, check_pos_inf: bool = False):
155
+ """
156
+ Modify ``x`` in place to replace ``float("-inf")`` with the minimum value of the dtype when ``check_neg_inf``
157
+ is ``True`` and to replace ``float("inf")`` with the maximum value of the dtype when ``check_pos_inf`` is ``True``.
158
+ """
159
+ if check_neg_inf:
160
+ x.masked_fill_(x == float("-inf"), torch.finfo(x.dtype).min)
161
+ if check_pos_inf:
162
+ x.masked_fill_(x == float("inf"), torch.finfo(x.dtype).max)
163
+
164
+
165
+ def activation_checkpoint_function(cfg: ModelConfig):
166
+ preserve_rng_state = (
167
+ (cfg.attention_dropout == 0.0) and (cfg.embedding_dropout == 0.0) and (cfg.residual_dropout == 0.0)
168
+ )
169
+ from torch.utils.checkpoint import checkpoint
170
+
171
+ return partial(
172
+ checkpoint,
173
+ preserve_rng_state=preserve_rng_state,
174
+ use_reentrant=False,
175
+ )
176
+
177
+
178
+ class BufferCache(dict, MutableMapping[str, torch.Tensor]):
179
+ """
180
+ Cache for attention biases and other things that would normally be stored as buffers.
181
+ We avoid using buffers because we've run into various issues doing so with FSDP.
182
+ In general it appears the way FSDP handles buffers is not well-defined.
183
+ It doesn't shard them but apparently it does synchronize them across processes, which we want to avoid
184
+ since (A) it isn't necessary, and (B) we sometimes have `-inf` in these biases which might get turned into
185
+ NaNs when they're synchronized due to casting or some other issue.
186
+ """
187
+
188
+
189
+ def _non_meta_init_device(config: ModelConfig) -> torch.device:
190
+ if config.init_device is not None and config.init_device != "meta":
191
+ return torch.device(config.init_device)
192
+ else:
193
+ return torch.device("cuda" if torch.cuda.is_available() else "cpu")
194
+
195
+
196
+ class Dropout(nn.Dropout):
197
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
198
+ if self.p == 0.0:
199
+ return input
200
+ else:
201
+ return F.dropout(input, self.p, self.training, self.inplace)
202
+
203
+
204
+ class LayerNormBase(nn.Module):
205
+ def __init__(
206
+ self,
207
+ config: ModelConfig,
208
+ *,
209
+ size: Optional[int] = None,
210
+ elementwise_affine: Optional[bool] = True,
211
+ eps: float = 1e-05,
212
+ ):
213
+ super().__init__()
214
+ self.config = config
215
+ self.eps = eps
216
+ self.normalized_shape = (size or config.d_model,)
217
+ if elementwise_affine or (elementwise_affine is None and self.config.layer_norm_with_affine):
218
+ self.weight = nn.Parameter(torch.ones(self.normalized_shape, device=config.init_device))
219
+ use_bias = self.config.bias_for_layer_norm
220
+ if use_bias is None:
221
+ use_bias = self.config.include_bias
222
+ if use_bias:
223
+ self.bias = nn.Parameter(torch.zeros(self.normalized_shape, device=config.init_device))
224
+ else:
225
+ self.register_parameter("bias", None)
226
+ else:
227
+ self.register_parameter("bias", None)
228
+ self.register_parameter("weight", None)
229
+
230
+ @abstractmethod
231
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
232
+ raise NotImplementedError
233
+
234
+ @classmethod
235
+ def build(cls, config: ModelConfig, size: Optional[int] = None, **kwargs) -> LayerNormBase:
236
+ if config.layer_norm_type == LayerNormType.default:
237
+ return LayerNorm(config, size=size, low_precision=False, **kwargs)
238
+ elif config.layer_norm_type == LayerNormType.low_precision:
239
+ return LayerNorm(config, size=size, low_precision=True, **kwargs)
240
+ elif config.layer_norm_type == LayerNormType.rms:
241
+ return RMSLayerNorm(config, size=size, **kwargs)
242
+ elif config.layer_norm_type == LayerNormType.gemma_rms:
243
+ return GemmaRMSLayerNorm(config, size=size, **kwargs)
244
+ else:
245
+ raise NotImplementedError(f"Unknown LayerNorm type: '{config.layer_norm_type}'")
246
+
247
+ def _cast_if_autocast_enabled(self, tensor: torch.Tensor, dtype: Optional[torch.dtype] = None) -> torch.Tensor:
248
+ # NOTE: `is_autocast_enabled()` only checks for CUDA autocast, so we use the separate function
249
+ # `is_autocast_cpu_enabled()` for CPU autocast.
250
+ # See https://github.com/pytorch/pytorch/issues/110966.
251
+ if tensor.device.type == "cuda" and torch.is_autocast_enabled():
252
+ return tensor.to(dtype=dtype if dtype is not None else torch.get_autocast_gpu_dtype())
253
+ elif tensor.device.type == "cpu" and torch.is_autocast_cpu_enabled():
254
+ return tensor.to(dtype=dtype if dtype is not None else torch.get_autocast_cpu_dtype())
255
+ else:
256
+ return tensor
257
+
258
+ def reset_parameters(self):
259
+ if self.weight is not None:
260
+ torch.nn.init.ones_(self.weight) # type: ignore
261
+ if self.bias is not None:
262
+ torch.nn.init.zeros_(self.bias) # type: ignore
263
+
264
+
265
+ class LayerNorm(LayerNormBase):
266
+ """
267
+ The default :class:`LayerNorm` implementation which can optionally run in low precision.
268
+ """
269
+
270
+ def __init__(
271
+ self,
272
+ config: ModelConfig,
273
+ size: Optional[int] = None,
274
+ low_precision: bool = False,
275
+ elementwise_affine: Optional[bool] = None,
276
+ eps: float = 1e-05,
277
+ ):
278
+ super().__init__(config, size=size, elementwise_affine=elementwise_affine, eps=eps)
279
+ self.low_precision = low_precision
280
+
281
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
282
+ if self.low_precision:
283
+ module_device = x.device
284
+ downcast_x = self._cast_if_autocast_enabled(x)
285
+ downcast_weight = (
286
+ self._cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
287
+ )
288
+ downcast_bias = self._cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
289
+ with torch.autocast(enabled=False, device_type=module_device.type):
290
+ return F.layer_norm(
291
+ downcast_x, self.normalized_shape, weight=downcast_weight, bias=downcast_bias, eps=self.eps
292
+ )
293
+ else:
294
+ return F.layer_norm(x, self.normalized_shape, weight=self.weight, bias=self.bias, eps=self.eps)
295
+
296
+
297
+ class RMSLayerNorm(LayerNormBase):
298
+ """
299
+ RMS layer norm, a simplified :class:`LayerNorm` implementation
300
+ """
301
+
302
+ def __init__(
303
+ self,
304
+ config: ModelConfig,
305
+ size: Optional[int] = None,
306
+ elementwise_affine: Optional[bool] = None,
307
+ eps: float = 1e-5,
308
+ ):
309
+ super().__init__(config, size=size, elementwise_affine=elementwise_affine, eps=config.rms_norm_eps)
310
+
311
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
312
+ with torch.autocast(enabled=False, device_type=x.device.type):
313
+ og_dtype = x.dtype
314
+ x = x.to(torch.float32)
315
+ variance = x.pow(2).mean(-1, keepdim=True)
316
+ x = x * torch.rsqrt(variance + self.eps)
317
+ x = x.to(og_dtype)
318
+
319
+ if self.weight is not None:
320
+ if self.bias is not None:
321
+ return self.weight * x + self.bias
322
+ else:
323
+ return self.weight * x
324
+ else:
325
+ return x
326
+
327
+
328
+ class GemmaRMSLayerNorm(LayerNormBase):
329
+ """
330
+ Gemma RMS layer norm, a simplified :class:`LayerNorm` implementation
331
+ """
332
+
333
+ def __init__(
334
+ self,
335
+ config: ModelConfig,
336
+ size: Optional[int] = None,
337
+ elementwise_affine: Optional[bool] = None,
338
+ eps: float = 1e-5,
339
+ ):
340
+ super().__init__(config, size=size, elementwise_affine=elementwise_affine, eps=config.rms_norm_eps)
341
+
342
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
343
+ with torch.autocast(enabled=False, device_type=x.device.type):
344
+ og_dtype = x.dtype
345
+ x = x.to(torch.float32)
346
+ variance = x.pow(2).mean(-1, keepdim=True)
347
+ x = x * torch.rsqrt(variance + self.eps)
348
+ x = x.to(og_dtype)
349
+
350
+ if self.weight is not None:
351
+ if self.bias is not None:
352
+ return x * (1 + self.weight) + self.bias
353
+ else:
354
+ return x * (1 + self.weight)
355
+ else:
356
+ return x
357
+
358
+
359
+ class RotaryEmbedding(nn.Module):
360
+ """
361
+ [Rotary positional embeddings (RoPE)](https://arxiv.org/abs/2104.09864).
362
+ """
363
+
364
+ def __init__(self, config: ModelConfig, cache: BufferCache):
365
+ super().__init__()
366
+ self.config = config
367
+ self.__cache = cache
368
+ # Warm up cache.
369
+ self.rope_theta = config.rope_theta
370
+ self.get_rotary_embedding(config.max_sequence_length, _non_meta_init_device(config))
371
+
372
+ def get_rotary_embedding(self, seq_len: int, device: torch.device) -> Tuple[torch.Tensor, torch.Tensor]:
373
+ if (
374
+ (pos_sin := self.__cache.get("rope_pos_sin")) is not None
375
+ and (pos_cos := self.__cache.get("rope_pos_cos")) is not None
376
+ and pos_sin.shape[-2] >= seq_len
377
+ and pos_cos.shape[-2] >= seq_len
378
+ ):
379
+ if pos_sin.device != device:
380
+ pos_sin = pos_sin.to(device)
381
+ self.__cache["rope_pos_sin"] = pos_sin
382
+ if pos_cos.device != device:
383
+ pos_cos = pos_cos.to(device)
384
+ self.__cache["rope_pos_cos"] = pos_cos
385
+ return pos_sin[:, :, :seq_len, :], pos_cos[:, :, :seq_len, :]
386
+
387
+ with torch.autocast(device.type, enabled=False):
388
+ dim = self.config.d_model // self.config.n_heads
389
+ inv_freq = 1.0 / (self.rope_theta ** (torch.arange(0, dim, 2, device=device, dtype=torch.float) / dim))
390
+ seq = torch.arange(seq_len, device=device, dtype=torch.float)
391
+ freqs = einsum("i , j -> i j", seq, inv_freq)
392
+ positions = torch.cat((freqs, freqs), dim=-1)
393
+ pos_sin, pos_cos = positions.sin()[None, None, :, :], positions.cos()[None, None, :, :]
394
+ self.__cache["rope_pos_sin"] = pos_sin
395
+ self.__cache["rope_pos_cos"] = pos_cos
396
+ return pos_sin, pos_cos
397
+
398
+ def rotate_half(self, x: torch.Tensor) -> torch.Tensor:
399
+ B, nh, T, hs = x.size()
400
+ x = x.view(B, nh, T, 2, hs // 2)
401
+ x1, x2 = x.unbind(dim=-2)
402
+ return torch.cat((-x2, x1), dim=-1)
403
+
404
+ def apply_rotary_pos_emb(self, pos_sin: torch.Tensor, pos_cos: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
405
+ return ((t * pos_cos) + (self.rotate_half(t) * pos_sin)).to(t.dtype)
406
+
407
+ def forward(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
408
+ if self.config.rope_full_precision:
409
+ q_, k_ = q.float(), k.float()
410
+ else:
411
+ q_, k_ = q, k
412
+
413
+ with torch.autocast(q.device.type, enabled=False):
414
+ query_len, key_len = q_.shape[-2], k_.shape[-2] # could be different if layer_past not None
415
+ pos_sin, pos_cos = self.get_rotary_embedding(key_len, q_.device)
416
+ pos_sin = pos_sin.type_as(q_)
417
+ pos_cos = pos_cos.type_as(q_)
418
+ q_ = self.apply_rotary_pos_emb(
419
+ pos_sin[:, :, key_len - query_len : key_len, :],
420
+ pos_cos[:, :, key_len - query_len : key_len, :],
421
+ q_,
422
+ )
423
+ k_ = self.apply_rotary_pos_emb(pos_sin, pos_cos, k_)
424
+ return q_.type_as(q), k_.type_as(k)
425
+
426
+
427
+ class Activation(nn.Module):
428
+ def __init__(self, config: ModelConfig):
429
+ super().__init__()
430
+ self.config = config
431
+
432
+ @abstractmethod
433
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
434
+ raise NotImplementedError
435
+
436
+ @property
437
+ @abstractmethod
438
+ def output_multiplier(self) -> float:
439
+ raise NotImplementedError
440
+
441
+ @classmethod
442
+ def build(cls, config: ModelConfig) -> Activation:
443
+ if config.activation_type == ActivationType.gelu:
444
+ return cast(Activation, GELU(approximate="none"))
445
+ elif config.activation_type == ActivationType.relu:
446
+ return cast(Activation, ReLU(inplace=False))
447
+ elif config.activation_type == ActivationType.silu:
448
+ return cast(Activation, SiLU(inplace=False))
449
+ elif config.activation_type == ActivationType.swiglu:
450
+ return SwiGLU(config)
451
+ else:
452
+ raise NotImplementedError(f"Unknown activation: '{config.activation_type}'")
453
+
454
+
455
+ class GELU(nn.GELU):
456
+ @property
457
+ def output_multiplier(self) -> float:
458
+ return 1.0
459
+
460
+
461
+ class ReLU(nn.ReLU):
462
+ @property
463
+ def output_multiplier(self) -> float:
464
+ return 1.0
465
+
466
+ class SiLU(nn.SiLU):
467
+ @property
468
+ def output_multiplier(self) -> float:
469
+ return 1.0
470
+
471
+ class SwiGLU(Activation):
472
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
473
+ x, gate = x.chunk(2, dim=-1)
474
+ return F.silu(gate) * x
475
+
476
+ @property
477
+ def output_multiplier(self) -> float:
478
+ return 0.5
479
+
480
+
481
+ def causal_attention_bias(seq_len: int, device: torch.device) -> torch.FloatTensor:
482
+ att_bias = torch.triu(
483
+ torch.ones(seq_len, seq_len, device=device, dtype=torch.float),
484
+ diagonal=1,
485
+ )
486
+ att_bias.masked_fill_(att_bias == 1, torch.finfo(att_bias.dtype).min)
487
+ return att_bias.view(1, 1, seq_len, seq_len) # type: ignore
488
+
489
+
490
+ def get_causal_attention_bias(cache: BufferCache, seq_len: int, device: torch.device) -> torch.Tensor:
491
+ if (causal_bias := cache.get("causal_attention_bias")) is not None and causal_bias.shape[-1] >= seq_len:
492
+ if causal_bias.device != device:
493
+ causal_bias = causal_bias.to(device)
494
+ cache["causal_attention_bias"] = causal_bias
495
+ return causal_bias
496
+ with torch.autocast(device.type, enabled=False):
497
+ causal_bias = causal_attention_bias(seq_len, device)
498
+ cache["causal_attention_bias"] = causal_bias
499
+ return causal_bias
500
+
501
+
502
+ def alibi_attention_bias(seq_len: int, config: ModelConfig, device: torch.device) -> torch.FloatTensor:
503
+ alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.float, device=device).view(1, 1, 1, seq_len)
504
+
505
+ # shape: (1, 1, seq_len, seq_len)
506
+ alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.float, device=device).view(1, 1, seq_len, 1)
507
+ alibi_bias.abs_().mul_(-1)
508
+
509
+ # shape: (n_heads,)
510
+ m = torch.arange(1, config.n_heads + 1, dtype=torch.float, device=device)
511
+ m.mul_(config.alibi_bias_max / config.n_heads)
512
+
513
+ # shape: (1, n_heads, seq_len, seq_len)
514
+ return alibi_bias * (1.0 / (2 ** m.view(1, config.n_heads, 1, 1))) # type: ignore
515
+
516
+
517
+ class LLaDABlock(nn.Module):
518
+ """
519
+ A base class for transformer block implementations.
520
+ """
521
+
522
+ def __init__(self, layer_id: int, config: ModelConfig, cache: BufferCache):
523
+ super().__init__()
524
+ self.layer_id = layer_id
525
+ self.config = config
526
+ self.hidden_size = (
527
+ config.mlp_hidden_size if config.mlp_hidden_size is not None else config.mlp_ratio * config.d_model
528
+ )
529
+ self.__cache = cache
530
+ assert config.d_model % config.n_heads == 0
531
+
532
+ self._activation_checkpoint_fn = None
533
+
534
+ # Dropout.
535
+ self.dropout = Dropout(config.residual_dropout)
536
+
537
+ # Layer norms.
538
+ self.k_norm: Optional[LayerNormBase] = None
539
+ self.q_norm: Optional[LayerNormBase] = None
540
+ if config.attention_layer_norm:
541
+ self.k_norm = LayerNormBase.build(
542
+ config,
543
+ size=(config.d_model // config.n_heads) * config.effective_n_kv_heads,
544
+ elementwise_affine=config.attention_layer_norm_with_affine,
545
+ )
546
+ self.q_norm = LayerNormBase.build(config, elementwise_affine=config.attention_layer_norm_with_affine)
547
+
548
+ # Activation function.
549
+ self.act = Activation.build(config)
550
+ assert (self.act.output_multiplier * self.hidden_size) % 1 == 0
551
+
552
+ # Attention output projection.
553
+ self.attn_out = nn.Linear(
554
+ config.d_model, config.d_model, bias=config.include_bias, device=config.init_device
555
+ )
556
+
557
+ # Feed-forward output projection.
558
+ self.ff_out = nn.Linear(
559
+ int(self.act.output_multiplier * self.hidden_size),
560
+ config.d_model,
561
+ bias=config.include_bias,
562
+ device=config.init_device,
563
+ )
564
+ self.ff_out._is_residual = True # type: ignore
565
+
566
+ # Rotary embeddings.
567
+ if self.config.rope:
568
+ self.rotary_emb = RotaryEmbedding(config, self.__cache)
569
+
570
+ self.flash_attn_func = None
571
+ if config.flash_attention:
572
+ try:
573
+ from flash_attn import flash_attn_func # type: ignore
574
+
575
+ self.flash_attn_func = flash_attn_func
576
+ except ModuleNotFoundError:
577
+ pass
578
+
579
+ def reset_parameters(self):
580
+ if self.k_norm is not None:
581
+ self.k_norm.reset_parameters()
582
+ if self.q_norm is not None:
583
+ self.q_norm.reset_parameters()
584
+ init_weights(
585
+ self.config,
586
+ self.attn_out,
587
+ d=self.config.d_model,
588
+ layer_id=self.layer_id,
589
+ type_of_module=ModuleType.out_module,
590
+ )
591
+ init_weights(
592
+ self.config,
593
+ self.ff_out,
594
+ d=self.ff_out.in_features,
595
+ layer_id=self.layer_id,
596
+ type_of_module=ModuleType.out_module,
597
+ )
598
+
599
+ def set_activation_checkpointing(self, strategy: Optional[ActivationCheckpointingStrategy]):
600
+ if strategy == ActivationCheckpointingStrategy.fine_grained:
601
+ self._activation_checkpoint_fn = activation_checkpoint_function(self.config)
602
+ else:
603
+ self._activation_checkpoint_fn = None
604
+
605
+ @classmethod
606
+ def _cast_attn_bias(cls, bias: torch.Tensor, input_dtype: torch.dtype) -> torch.Tensor:
607
+ target_dtype = input_dtype
608
+ # NOTE: `is_autocast_enabled()` only checks for CUDA autocast, so we use the separate function
609
+ # `is_autocast_cpu_enabled()` for CPU autocast.
610
+ # See https://github.com/pytorch/pytorch/issues/110966.
611
+ if bias.device.type == "cuda" and torch.is_autocast_enabled():
612
+ target_dtype = torch.get_autocast_gpu_dtype()
613
+ elif bias.device.type == "cpu" and torch.is_autocast_cpu_enabled():
614
+ target_dtype = torch.get_autocast_cpu_dtype()
615
+ if bias.dtype != target_dtype:
616
+ bias = bias.to(target_dtype)
617
+ ensure_finite_(bias, check_neg_inf=True, check_pos_inf=False)
618
+ return bias
619
+
620
+ def _scaled_dot_product_attention(
621
+ self,
622
+ q: torch.Tensor,
623
+ k: torch.Tensor,
624
+ v: torch.Tensor,
625
+ attn_mask: Optional[torch.Tensor] = None,
626
+ dropout_p: float = 0.0,
627
+ is_causal: bool = False,
628
+ ) -> torch.Tensor:
629
+ """
630
+ Computes scaled dot product attention on query, key and value tensors, using an optional
631
+ attention mask if passed, and applying dropout if a probability greater than 0.0 is specified.
632
+ """
633
+ if self.flash_attn_func is not None and attn_mask is None:
634
+ r = self.flash_attn_func(
635
+ q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), dropout_p=dropout_p, causal=False
636
+ )
637
+ return r.transpose(1, 2)
638
+ else:
639
+ # torch's sdpa doesn't support GQA, so we're doing this
640
+ assert k.size(1) == v.size(1)
641
+ num_kv_heads = k.size(1)
642
+ num_q_heads = q.size(1)
643
+ if num_q_heads != num_kv_heads:
644
+ assert num_q_heads % num_kv_heads == 0
645
+ k = k.repeat_interleave(num_q_heads // num_kv_heads, dim=1, output_size=num_q_heads)
646
+ v = v.repeat_interleave(num_q_heads // num_kv_heads, dim=1, output_size=num_q_heads)
647
+
648
+ # Modify: MDM set causal to False, and with no attn_mask.
649
+ return F.scaled_dot_product_attention(
650
+ q,
651
+ k,
652
+ v,
653
+ attn_mask=None,
654
+ dropout_p=dropout_p,
655
+ is_causal=False,
656
+ )
657
+
658
+ def attention(
659
+ self,
660
+ q: torch.Tensor,
661
+ k: torch.Tensor,
662
+ v: torch.Tensor,
663
+ attention_bias: Optional[torch.Tensor] = None,
664
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
665
+ use_cache: bool = False,
666
+ ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
667
+ B, T, C = q.size() # batch size, sequence length, d_model
668
+ dtype = k.dtype
669
+
670
+ # Optionally apply layer norm to keys and queries.
671
+ if self.q_norm is not None and self.k_norm is not None:
672
+ q = self.q_norm(q).to(dtype=dtype)
673
+ k = self.k_norm(k).to(dtype=dtype)
674
+
675
+ # Move head forward to be next to the batch dim.
676
+ # shape: (B, nh, T, hs)
677
+ q = q.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
678
+ # shape: (B, n_kv_h, T, hs)
679
+ k = k.view(B, T, self.config.effective_n_kv_heads, C // self.config.n_heads).transpose(1, 2)
680
+ # shape: (B, n_kv_h, T, hs)
681
+ v = v.view(B, T, self.config.effective_n_kv_heads, C // self.config.n_heads).transpose(1, 2)
682
+
683
+ if layer_past is not None:
684
+ past_key, past_value = layer_past
685
+ k = torch.cat((past_key, k), dim=-2)
686
+ v = torch.cat((past_value, v), dim=-2)
687
+
688
+ present = (k, v) if use_cache else None
689
+ query_len, key_len = q.shape[-2], k.shape[-2] # could be different if layer_past not None
690
+
691
+ if self.config.rope:
692
+ # Apply rotary embeddings.
693
+ q, k = self.rotary_emb(q, k)
694
+
695
+ if attention_bias is not None:
696
+ # Resize and cast attention bias.
697
+ # The current dtype of the attention bias might not match the dtype that the SDP attn function will
698
+ # run in if AMP is enabled, and this can be a problem if some tokens are masked out due to padding
699
+ # as down-casting the attention bias to the autocast precision will result in -infs, which will
700
+ # cause the SDP attn function to produce NaNs.
701
+ attention_bias = self._cast_attn_bias(
702
+ attention_bias[:, :, key_len - query_len : key_len, :key_len], dtype
703
+ )
704
+
705
+ # Get the attention scores.
706
+ # shape: (B, nh, T, hs)
707
+ att = self._scaled_dot_product_attention(
708
+ q,
709
+ k,
710
+ v,
711
+ attn_mask=None,
712
+ dropout_p=0.0 if not self.training else self.config.attention_dropout,
713
+ is_causal=False,
714
+ )
715
+
716
+ # Re-assemble all head outputs side-by-side.
717
+ att = att.transpose(1, 2).contiguous().view(B, T, C)
718
+
719
+ # Apply output projection.
720
+ return self.attn_out(att), present
721
+
722
+ @abstractmethod
723
+ def forward(
724
+ self,
725
+ x: torch.Tensor,
726
+ attention_bias: Optional[torch.FloatTensor] = None,
727
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
728
+ use_cache: bool = False,
729
+ ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
730
+ raise NotImplementedError
731
+
732
+ @classmethod
733
+ def build(cls, layer_id: int, config: ModelConfig, cache: BufferCache) -> LLaDABlock:
734
+ if config.block_type == BlockType.sequential:
735
+ return LLaDASequentialBlock(layer_id, config, cache)
736
+ elif config.block_type == BlockType.llama:
737
+ return LLaDALlamaBlock(layer_id, config, cache)
738
+ else:
739
+ raise NotImplementedError(f"Unknown block type: '{config.block_type}'")
740
+
741
+
742
+ class LLaDASequentialBlock(LLaDABlock):
743
+ """
744
+ This is a typical transformer block where the output is computed as ``MLP(LN(x + Attention(LN(x))))``
745
+ (plus another skip connection).
746
+ """
747
+
748
+ def __init__(self, layer_id: int, config: ModelConfig, cache: BufferCache):
749
+ super().__init__(layer_id, config, cache)
750
+ # Layer norms.
751
+ self.attn_norm = LayerNorm.build(config)
752
+ self.ff_norm = LayerNorm.build(config)
753
+ # Attention input projection. Projects x -> (q, k, v)
754
+ head_dim = config.d_model // config.n_heads
755
+ self.fused_dims = (
756
+ config.d_model,
757
+ config.effective_n_kv_heads * head_dim,
758
+ config.effective_n_kv_heads * head_dim,
759
+ )
760
+ self.att_proj = nn.Linear(
761
+ config.d_model, sum(self.fused_dims), bias=config.include_bias | config.include_qkv_bias, device=config.init_device
762
+ )
763
+ # Feed-forward input projection.
764
+ self.ff_proj = nn.Linear(
765
+ config.d_model, self.hidden_size, bias=config.include_bias, device=config.init_device
766
+ )
767
+
768
+ def reset_parameters(self):
769
+ super().reset_parameters()
770
+ self.attn_norm.reset_parameters()
771
+ self.ff_norm.reset_parameters()
772
+ # NOTE: the standard deviation for these weights does not depend on the layer.
773
+ init_weights(
774
+ self.config, self.att_proj, d=self.config.d_model, layer_id=None, type_of_module=ModuleType.in_module
775
+ )
776
+ init_weights(
777
+ self.config, self.ff_proj, d=self.config.d_model, layer_id=None, type_of_module=ModuleType.in_module
778
+ )
779
+
780
+ def forward(
781
+ self,
782
+ x: torch.Tensor,
783
+ attention_bias: Optional[torch.Tensor] = None,
784
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
785
+ use_cache: bool = False,
786
+ ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
787
+ # Get query, key, value projections.
788
+ # shape:
789
+ # - for regular attn q, k, v: (batch_size, seq_len, d_model)
790
+ # - for multi-query attn q: (batch_size, seq_len, d_model)
791
+ # k, v: (batch_size, seq_len, d_model // n_heads)
792
+ # - for group query attn q: (batch_size, seq_len, d_model)
793
+ # k, v: (batch_size, seq_len, d_model // n_kv_heads)
794
+ if self._activation_checkpoint_fn is not None:
795
+ q, k, v = self.att_proj(self._activation_checkpoint_fn(self.attn_norm, x)).split(
796
+ self.fused_dims, dim=-1
797
+ )
798
+ else:
799
+ q, k, v = self.att_proj(self.attn_norm(x)).split(self.fused_dims, dim=-1)
800
+
801
+ # Get attention scores.
802
+ if self._activation_checkpoint_fn is not None:
803
+ att, cache = self._activation_checkpoint_fn( # type: ignore
804
+ self.attention, q, k, v, attention_bias, layer_past=layer_past, use_cache=use_cache
805
+ )
806
+ else:
807
+ att, cache = self.attention(q, k, v, attention_bias, layer_past=layer_past, use_cache=use_cache)
808
+
809
+ # Add attention scores.
810
+ # shape: (B, T, C)
811
+ x = x + self.dropout(att)
812
+
813
+ # Add feed-forward projection.
814
+ # shape: (batch_size, seq_len, d_model)
815
+ og_x = x
816
+ if self._activation_checkpoint_fn is not None:
817
+ x = self._activation_checkpoint_fn(self.ff_norm, x) # type: ignore
818
+ else:
819
+ x = self.ff_norm(x)
820
+ x = self.ff_proj(x)
821
+ if self._activation_checkpoint_fn is not None:
822
+ x = self._activation_checkpoint_fn(self.act, x) # type: ignore
823
+ else:
824
+ x = self.act(x)
825
+ x = self.ff_out(x)
826
+ x = self.dropout(x)
827
+ x = og_x + x
828
+
829
+ return x, cache
830
+
831
+
832
+ class LLaDALlamaBlock(LLaDABlock):
833
+ """
834
+ This is a transformer block where the output is computed as ``MLP(LN(x + Attention(LN(x))))``
835
+ (plus another skip connection). This block is similar to `LLaDASequentialBlock`
836
+ but some operations have slightly different implementations to imitate the
837
+ behavior of Llama.
838
+ """
839
+
840
+ def __init__(self, layer_id: int, config: ModelConfig, cache: BufferCache):
841
+ super().__init__(layer_id, config, cache)
842
+ # Layer norms.
843
+ self.attn_norm = LayerNorm.build(config)
844
+ self.ff_norm = LayerNorm.build(config)
845
+ self.__cache = cache
846
+
847
+ # Attention input projection. Projects x -> (q, k, v)
848
+ head_dim = config.d_model // config.n_heads
849
+ q_proj_out_dim = config.d_model
850
+ k_proj_out_dim = config.effective_n_kv_heads * head_dim
851
+ v_proj_out_dim = config.effective_n_kv_heads * head_dim
852
+ self.q_proj = nn.Linear(
853
+ config.d_model, q_proj_out_dim, bias=config.include_bias | config.include_qkv_bias, device=config.init_device
854
+ )
855
+ self.k_proj = nn.Linear(
856
+ config.d_model, k_proj_out_dim, bias=config.include_bias | config.include_qkv_bias, device=config.init_device
857
+ )
858
+ self.v_proj = nn.Linear(
859
+ config.d_model, v_proj_out_dim, bias=config.include_bias | config.include_qkv_bias, device=config.init_device
860
+ )
861
+
862
+ # Feed-forward input projection.
863
+ self.ff_proj = nn.Linear(
864
+ config.d_model, self.hidden_size, bias=config.include_bias, device=config.init_device
865
+ )
866
+ # new add
867
+ self.up_proj = nn.Linear(
868
+ config.d_model, self.hidden_size, bias=config.include_bias, device=config.init_device
869
+ )
870
+
871
+ def reset_parameters(self):
872
+ super().reset_parameters()
873
+ self.attn_norm.reset_parameters()
874
+ self.ff_norm.reset_parameters()
875
+ # NOTE: the standard deviation for these weights does not depend on the layer.
876
+ init_weights(self.config, self.q_proj, d=self.config.d_model, layer_id=None)
877
+ init_weights(self.config, self.k_proj, d=self.config.d_model, layer_id=None)
878
+ init_weights(self.config, self.v_proj, d=self.config.d_model, layer_id=None)
879
+ init_weights(self.config, self.ff_proj, d=self.config.d_model, layer_id=None)
880
+ init_weights(self.config, self.up_proj, d=self.config.d_model, layer_id=None) # new add
881
+
882
+ def forward(
883
+ self,
884
+ x: torch.Tensor,
885
+ attention_bias: Optional[torch.Tensor] = None,
886
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
887
+ use_cache: bool = False,
888
+ ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
889
+ # Get query, key, value projections.
890
+ # shape:
891
+ # - for regular attn q, k, v: (batch_size, seq_len, d_model)
892
+ # - for multi-query attn q: (batch_size, seq_len, d_model)
893
+ # k, v: (batch_size, seq_len, d_model // n_heads)
894
+ # - for group query attn q: (batch_size, seq_len, d_model)
895
+ # k, v: (batch_size, seq_len, d_model // n_kv_heads)
896
+ x_normed = self.attn_norm(x)
897
+ q = self.q_proj(x_normed)
898
+ k = self.k_proj(x_normed)
899
+ v = self.v_proj(x_normed)
900
+
901
+ # Get attention scores.
902
+ if self._activation_checkpoint_fn is not None:
903
+ att, cache = self._activation_checkpoint_fn( # type: ignore
904
+ self.attention, q, k, v, attention_bias, layer_past=layer_past, use_cache=use_cache
905
+ )
906
+ else:
907
+ att, cache = self.attention(q, k, v, attention_bias, layer_past=layer_past, use_cache=use_cache)
908
+
909
+ # Add attention scores.
910
+ # shape: (B, T, C)
911
+ x = x + self.dropout(att)
912
+
913
+ # Add feed-forward projection.
914
+ # shape: (batch_size, seq_len, d_model)
915
+ og_x = x
916
+ if self._activation_checkpoint_fn is not None:
917
+ x = self._activation_checkpoint_fn(self.ff_norm, x) # type: ignore
918
+ else:
919
+ x = self.ff_norm(x)
920
+ x, x_up = self.ff_proj(x), self.up_proj(x) # new add
921
+ if self._activation_checkpoint_fn is not None:
922
+ x = self._activation_checkpoint_fn(self.act, x) # type: ignore
923
+ else:
924
+ x = self.act(x)
925
+ #x = x * x_up # new add
926
+ x = self.ff_out(x)
927
+ x = self.dropout(x)
928
+ x = og_x + x
929
+
930
+ return x, cache
931
+
932
+
933
+ class LLaDAOutput(NamedTuple):
934
+ logits: torch.FloatTensor
935
+ """
936
+ A tensor of shape `(batch_size, seq_len, vocab_size)` representing the log probabilities
937
+ for the next token *before* normalization via (log) softmax.
938
+ """
939
+
940
+ attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]]
941
+ """
942
+ Attention keys and values from each block.
943
+ """
944
+
945
+ hidden_states: Optional[Tuple[torch.Tensor]]
946
+ """
947
+ Hidden states from each block.
948
+ """
949
+
950
+
951
+ class LLaDAGenerateOutput(NamedTuple):
952
+ token_ids: torch.LongTensor
953
+ """
954
+ The generated token IDs, a tensor of shape `(batch_size, beam_size, max_steps)`.
955
+ These do *not* include the original input IDs.
956
+ """
957
+
958
+ scores: torch.FloatTensor
959
+ """
960
+ The scores of the generated sequences, a tensor of shape `(batch_size, beam_size)`.
961
+ """
962
+
963
+
964
+ class LLaDABlockGroup(nn.ModuleList):
965
+ def __init__(self, config: ModelConfig, layer_offset: int, modules: Optional[Iterable[nn.Module]] = None):
966
+ super().__init__(modules)
967
+ self.config = config
968
+ self.layer_offset = layer_offset
969
+ self.activation_checkpointing_strategy: Optional[ActivationCheckpointingStrategy] = None
970
+ self._activation_checkpoint_fn = activation_checkpoint_function(self.config)
971
+
972
+ def forward(
973
+ self,
974
+ x: torch.Tensor,
975
+ attention_bias: Optional[torch.FloatTensor] = None,
976
+ layers_past: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None,
977
+ use_cache: bool = False,
978
+ ) -> Tuple[torch.Tensor, Optional[List[Tuple[torch.Tensor, torch.Tensor]]]]:
979
+ attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = [] if use_cache else None
980
+ for block_idx, block in enumerate(self):
981
+ layer_past = None if layers_past is None else layers_past[block_idx]
982
+ block_idx += self.layer_offset
983
+ if (
984
+ (self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.whole_layer)
985
+ or (
986
+ self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_two
987
+ and block_idx % 2 == 0
988
+ )
989
+ or (
990
+ self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_three
991
+ and block_idx % 3 == 0
992
+ )
993
+ or (
994
+ self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_four
995
+ and block_idx % 4 == 0
996
+ )
997
+ ):
998
+ # shape: (batch_size, seq_len, d_model)
999
+ x, cache = self._activation_checkpoint_fn( # type: ignore
1000
+ block, x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache
1001
+ )
1002
+ else:
1003
+ # shape: (batch_size, seq_len, d_model)
1004
+ x, cache = block(x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache)
1005
+ if attn_key_values is not None:
1006
+ assert cache is not None
1007
+ attn_key_values.append(cache)
1008
+ return x, attn_key_values
1009
+
1010
+ def reset_parameters(self):
1011
+ for block in self:
1012
+ block.reset_parameters()
1013
+
1014
+ def set_activation_checkpointing(self, strategy: Optional[ActivationCheckpointingStrategy]):
1015
+ self.activation_checkpointing_strategy = strategy
1016
+ for block in self:
1017
+ block.set_activation_checkpointing(strategy)
1018
+
1019
+
1020
+ class LLaDAModel(nn.Module):
1021
+ def __init__(self, config: ModelConfig, init_params: bool = True):
1022
+ super().__init__()
1023
+ self.config = config
1024
+ self.__cache = BufferCache()
1025
+
1026
+ # Validate config.
1027
+ if self.config.alibi and self.config.flash_attention:
1028
+ raise Exception("ALiBi is currently not supported with FlashAttention")
1029
+
1030
+ if self.config.alibi and self.config.rope:
1031
+ raise Exception("ALiBi and RoPE are mutually exclusive")
1032
+
1033
+ if self.config.embedding_size is not None and self.config.embedding_size != self.config.vocab_size:
1034
+ if self.config.embedding_size < self.config.vocab_size:
1035
+ raise Exception("embedding size should be at least as big as vocab size")
1036
+ elif self.config.embedding_size % 128 != 0:
1037
+ import warnings
1038
+
1039
+ warnings.warn(
1040
+ "Embedding size is not a multiple of 128! This could hurt throughput performance.", UserWarning
1041
+ )
1042
+
1043
+ self.activation_checkpointing_strategy: Optional[ActivationCheckpointingStrategy] = None
1044
+ self._activation_checkpoint_fn: Callable = activation_checkpoint_function(self.config)
1045
+
1046
+ if not (
1047
+ 0 < self.config.block_group_size <= self.config.n_layers
1048
+ and self.config.n_layers % self.config.block_group_size == 0
1049
+ ):
1050
+ raise Exception("n layers must be divisible by block group size")
1051
+
1052
+ torch.backends.cuda.enable_flash_sdp(True)
1053
+ torch.backends.cuda.enable_mem_efficient_sdp(False) # this is super slow so make sure torch won't use it
1054
+
1055
+ self.transformer = nn.ModuleDict(
1056
+ dict(
1057
+ wte=nn.Embedding(
1058
+ config.embedding_size or config.vocab_size, config.d_model, device=config.init_device
1059
+ ),
1060
+ emb_drop=Dropout(config.embedding_dropout),
1061
+ ln_f=LayerNorm.build(config),
1062
+ )
1063
+ )
1064
+
1065
+ blocks = [LLaDABlock.build(i, config, self.__cache) for i in range(config.n_layers)]
1066
+ if self.config.block_group_size > 1:
1067
+ block_groups = [
1068
+ LLaDABlockGroup(config, i, blocks[i : i + config.block_group_size])
1069
+ for i in range(0, config.n_layers, config.block_group_size)
1070
+ ]
1071
+ self.transformer.update({"block_groups": nn.ModuleList(block_groups)})
1072
+ else:
1073
+ self.transformer.update({"blocks": nn.ModuleList(blocks)})
1074
+
1075
+ if not (self.config.alibi or self.config.rope):
1076
+ self.transformer.update(
1077
+ {"wpe": nn.Embedding(config.max_sequence_length, config.d_model, device=config.init_device)}
1078
+ )
1079
+ if not config.weight_tying:
1080
+ self.transformer.update(
1081
+ {
1082
+ "ff_out": nn.Linear(
1083
+ config.d_model,
1084
+ config.embedding_size or config.vocab_size,
1085
+ bias=config.include_bias,
1086
+ device=config.init_device,
1087
+ )
1088
+ }
1089
+ )
1090
+ # When `init_device="meta"` FSDP will call `reset_parameters()` to initialize weights.
1091
+ if init_params and self.config.init_device != "meta":
1092
+ self.reset_parameters()
1093
+ self.__num_fwd_flops: Optional[int] = None
1094
+
1095
+ # Warm up cache.
1096
+ if self.config.alibi:
1097
+ get_causal_attention_bias(self.__cache, config.max_sequence_length, _non_meta_init_device(config))
1098
+ self.get_alibi_attention_bias(config.max_sequence_length, _non_meta_init_device(config))
1099
+
1100
+ def set_activation_checkpointing(self, strategy: Optional[ActivationCheckpointingStrategy]):
1101
+ self.activation_checkpointing_strategy = strategy
1102
+ if self.config.block_group_size != 1:
1103
+ for block_group in self.transformer.block_groups:
1104
+ block_group.set_activation_checkpointing(strategy)
1105
+ else:
1106
+ for block in self.transformer.blocks:
1107
+ block.set_activation_checkpointing(strategy)
1108
+
1109
+ @property
1110
+ def device(self) -> torch.device:
1111
+ device: torch.device = self.transformer.wte.weight.device # type: ignore
1112
+ if device.type == "meta":
1113
+ return _non_meta_init_device(self.config)
1114
+ else:
1115
+ return device
1116
+
1117
+ def reset_parameters(self):
1118
+ log.info("Initializing model parameters...")
1119
+ # Top-level embeddings / linear layers.
1120
+ init_weights(
1121
+ self.config,
1122
+ self.transformer.wte, # type: ignore
1123
+ std_factor=(0.5 * math.sqrt(self.config.d_model)) if self.config.scale_logits else 1.0,
1124
+ type_of_module=ModuleType.emb,
1125
+ )
1126
+ if hasattr(self.transformer, "wpe"):
1127
+ init_weights(self.config, self.transformer.wpe, type_of_module=ModuleType.emb) # type: ignore
1128
+
1129
+ # Top-level layer norm.
1130
+ self.transformer.ln_f.reset_parameters() # type: ignore
1131
+
1132
+ # Output weights.
1133
+ if hasattr(self.transformer, "ff_out"):
1134
+ init_weights(self.config, self.transformer.ff_out, type_of_module=ModuleType.final_out) # type: ignore
1135
+
1136
+ # Let the blocks handle themselves.
1137
+ if self.config.block_group_size == 1:
1138
+ for block in self.transformer.blocks:
1139
+ block.reset_parameters()
1140
+ else:
1141
+ for block_group in self.transformer.block_groups:
1142
+ block_group.reset_parameters()
1143
+
1144
+ def get_alibi_attention_bias(self, seq_len: int, device: torch.device) -> torch.Tensor:
1145
+ if (alibi_bias := self.__cache.get("alibi_attention_bias")) is not None and alibi_bias.shape[
1146
+ -1
1147
+ ] >= seq_len:
1148
+ if alibi_bias.device != device:
1149
+ alibi_bias = alibi_bias.to(device)
1150
+ self.__cache["alibi_attention_bias"] = alibi_bias
1151
+ return alibi_bias
1152
+ with torch.autocast(device.type, enabled=False):
1153
+ alibi_bias = alibi_attention_bias(seq_len, self.config, device)
1154
+ self.__cache["alibi_attention_bias"] = alibi_bias
1155
+ return alibi_bias
1156
+
1157
+ def forward(
1158
+ self,
1159
+ input_ids: torch.LongTensor,
1160
+ input_embeddings: Optional[torch.FloatTensor] = None,
1161
+ attention_mask: Optional[torch.Tensor] = None,
1162
+ attention_bias: Optional[torch.Tensor] = None,
1163
+ past_key_values: Optional[Sequence[Tuple[torch.Tensor, torch.Tensor]]] = None,
1164
+ use_cache: bool = False,
1165
+ last_logits_only: bool = False,
1166
+ output_hidden_states: Optional[bool] = None,
1167
+ ) -> LLaDAOutput:
1168
+ """
1169
+ :param input_ids: A tensor of shape `(batch_size, seq_len)`.
1170
+ :param input_embeddings: A tensor of shape `(batch_size, seq_len, d_model)` with input
1171
+ embeddings. When provided, it is treated as the output of the input embedding layer.
1172
+ :param attention_mask: A tensor of shape `(batch_size, seq_len)` that indicates
1173
+ which input IDs are masked. A `1` value in the mask means that
1174
+ the corresponding input ID should *not* be ignored. A `0` means
1175
+ that the corresponding input ID is masked.
1176
+ This has the same meaning as the `attention_mask` in HuggingFace's `transformers`
1177
+ library.
1178
+ :param attention_bias: A tensor of shape `(batch_size, 1, seq_len, seq_len)`,
1179
+ `(1, 1, seq_len, seq_len)`, or `(seq_len, seq_len)`. This is used
1180
+ to introduce causal or other biases.
1181
+ If the tensor is a bool or byte tensor, a `True` or `1` at `attention_bias[:, :, i, j]`
1182
+ indicates that the i-th element in the sequence is allowed to attend to the j-th
1183
+ element in the sequence.
1184
+ If the tensor is a float tensor, it will just be added to the attention
1185
+ scores before the softmax.
1186
+ The default is causal, which corresponds to a lower-diagonal byte matrix of ones.
1187
+ :param past_key_values: Pre-computed keys and values for each attention block.
1188
+ Can be used to speed up sequential decoding. The `input_ids` which have
1189
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
1190
+ :param use_cache: If `True`, return key and value tensors for each block.
1191
+ :param last_logits_only: If `True`, only compute the logits for the last token of each sequence.
1192
+ This can speed up decoding when you only care about the next token.
1193
+ """
1194
+ # Add Basic MDM Model config check
1195
+ assert not self.config.alibi, "Alibi length extrapolation is not supported for MDM."
1196
+ assert self.config.rope, "Rope must be used in Llama-Encoder for MDM."
1197
+ assert (past_key_values is None and not use_cache), "The kvcache is not suppotred for MDM."
1198
+
1199
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else False
1200
+
1201
+ if past_key_values:
1202
+ assert len(past_key_values) == self.config.n_layers
1203
+
1204
+ batch_size, seq_len = input_ids.size() if input_embeddings is None else input_embeddings.size()[:2]
1205
+ if past_key_values is None:
1206
+ past_length = 0
1207
+ else:
1208
+ past_length = past_key_values[0][0].size(-2)
1209
+
1210
+ # Get embeddings of input.
1211
+ # shape: (batch_size, seq_len, d_model)
1212
+ x = self.transformer.wte(input_ids) if input_embeddings is None else input_embeddings # type: ignore
1213
+
1214
+ if self.config.input_emb_norm:
1215
+ x = x * (self.config.d_model**0.5)
1216
+
1217
+ if not (self.config.alibi or self.config.rope):
1218
+ # Get positional embeddings.
1219
+ # shape: (1, seq_len)
1220
+ pos = torch.arange(past_length, past_length + seq_len, dtype=torch.long, device=x.device).unsqueeze(0)
1221
+ # shape: (1, seq_len, d_model)
1222
+ pos_emb = self.transformer.wpe(pos) # type: ignore
1223
+ x = pos_emb + x
1224
+
1225
+ # Add input + positional embeddings and apply dropout.
1226
+ # shape: (batch_size, seq_len, d_model)
1227
+ x = self.transformer.emb_drop(x) # type: ignore
1228
+
1229
+ # Transform the attention mask into what the blocks expect.
1230
+ if attention_mask is not None and 0.0 in attention_mask:
1231
+ # shape: (batch_size, 1, 1, seq_len)
1232
+ attention_mask = attention_mask.to(dtype=torch.float).view(batch_size, -1)[:, None, None, :]
1233
+ attention_mask = (1.0 - attention_mask) * torch.finfo(attention_mask.dtype).min
1234
+ else:
1235
+ attention_mask = None
1236
+
1237
+ # Merge attention mask with attention bias.
1238
+ if (
1239
+ attention_bias is not None
1240
+ or attention_mask is not None
1241
+ or self.config.alibi
1242
+ # NOTE (epwalsh): we need to initialize the attn bias in order for attn to work properly
1243
+ # with key+value cache. Otherwise `F.scaled_dot_product_attention()` doesn't seem to compute
1244
+ # scores correctly.
1245
+ or past_key_values is not None
1246
+ ):
1247
+ if attention_bias is None and self.config.alibi:
1248
+ attention_bias = get_causal_attention_bias(
1249
+ self.__cache, past_length + seq_len, x.device
1250
+ ) + self.get_alibi_attention_bias(past_length + seq_len, x.device)
1251
+ elif attention_bias is None:
1252
+ attention_bias = get_causal_attention_bias(self.__cache, past_length + seq_len, x.device)
1253
+ elif attention_bias.dtype in (torch.int8, torch.bool):
1254
+ attention_bias = attention_bias.to(dtype=torch.float)
1255
+ attention_bias.masked_fill_(attention_bias == 0.0, torch.finfo(attention_bias.dtype).min)
1256
+
1257
+ # Transform to the right shape and data type.
1258
+ mask_len = seq_len
1259
+ if attention_mask is not None:
1260
+ mask_len = attention_mask.shape[-1]
1261
+ elif past_key_values is not None:
1262
+ mask_len = past_key_values[0][0].shape[-2] + seq_len
1263
+ attention_bias = attention_bias[:, :, :mask_len, :mask_len].to(dtype=torch.float)
1264
+
1265
+ # Add in the masking bias.
1266
+ if attention_mask is not None:
1267
+ attention_bias = attention_bias + attention_mask
1268
+ # Might get -infs after adding attention mask, since dtype.min + dtype.min = -inf.
1269
+ # `F.scaled_dot_product_attention()` doesn't handle -inf like you'd expect, instead
1270
+ # it can produce NaNs.
1271
+ ensure_finite_(attention_bias, check_neg_inf=True, check_pos_inf=False)
1272
+
1273
+ attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = [] if use_cache else None
1274
+
1275
+ # decoder layers
1276
+ all_hidden_states = []
1277
+
1278
+ # Apply blocks one-by-one.
1279
+ if self.config.block_group_size == 1:
1280
+ for block_idx, block in enumerate(self.transformer.blocks):
1281
+ if output_hidden_states:
1282
+ # add hidden states
1283
+ all_hidden_states.append(x)
1284
+
1285
+ layer_past = None if past_key_values is None else past_key_values[block_idx]
1286
+ if (
1287
+ (self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.whole_layer)
1288
+ or (
1289
+ self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_two
1290
+ and block_idx % 2 == 0
1291
+ )
1292
+ or (
1293
+ self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_three
1294
+ and block_idx % 3 == 0
1295
+ )
1296
+ or (
1297
+ self.activation_checkpointing_strategy == ActivationCheckpointingStrategy.one_in_four
1298
+ and block_idx % 4 == 0
1299
+ )
1300
+ ):
1301
+ # shape: (batch_size, seq_len, d_model)
1302
+ x, cache = self._activation_checkpoint_fn(
1303
+ block, x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache
1304
+ )
1305
+ else:
1306
+ # shape: (batch_size, seq_len, d_model)
1307
+ x, cache = block(x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache)
1308
+ if attn_key_values is not None:
1309
+ assert cache is not None
1310
+ attn_key_values.append(cache)
1311
+ else:
1312
+ for group_idx, block_group in enumerate(self.transformer.block_groups):
1313
+ if output_hidden_states:
1314
+ # add hidden states
1315
+ all_hidden_states.append(x)
1316
+
1317
+ layers_past = (
1318
+ None
1319
+ if past_key_values is None
1320
+ else past_key_values[
1321
+ group_idx * self.config.block_group_size : (group_idx + 1) * self.config.block_group_size
1322
+ ]
1323
+ )
1324
+ x, cache = block_group(
1325
+ x, attention_bias=attention_bias, layers_past=layers_past, use_cache=use_cache
1326
+ )
1327
+ if attn_key_values is not None:
1328
+ assert cache is not None
1329
+ attn_key_values.extend(cache)
1330
+
1331
+ if last_logits_only:
1332
+ # shape: (batch_size, 1, d_model)
1333
+ x = x[:, -1, :].unsqueeze(1)
1334
+
1335
+ # Apply final layer norm.
1336
+ # shape: (batch_size, seq_len or 1, d_model)
1337
+ x = self.transformer.ln_f(x) # type: ignore
1338
+ if output_hidden_states:
1339
+ # add final hidden state post-final-layernorm, following HuggingFace's convention
1340
+ all_hidden_states.append(x)
1341
+
1342
+ # Get logits.
1343
+ # shape: (batch_size, seq_len or 1, vocab_size)
1344
+ if self.config.weight_tying:
1345
+ logits = F.linear(x, self.transformer.wte.weight, None) # type: ignore
1346
+ else:
1347
+ logits = self.transformer.ff_out(x) # type: ignore
1348
+ if self.config.scale_logits:
1349
+ logits.mul_(1 / math.sqrt(self.config.d_model))
1350
+
1351
+ return LLaDAOutput(logits=logits, attn_key_values=attn_key_values, hidden_states=tuple(all_hidden_states) if output_hidden_states else None) # type: ignore[arg-type]
1352
+
1353
+
1354
+ def create_model_config_from_pretrained_config(config: LLaDAConfig):
1355
+ """
1356
+ Utility function
1357
+ """
1358
+
1359
+ kwargs = {}
1360
+ for field in fields(ModelConfig):
1361
+ kwargs[field.name] = getattr(config, field.name)
1362
+
1363
+ model_config = ModelConfig(**kwargs)
1364
+ return model_config
1365
+
1366
+
1367
+ class LLaDAModelLM(PreTrainedModel):
1368
+ """
1369
+ Extremely barebones HF model wrapper.
1370
+ """
1371
+
1372
+ config_class = LLaDAConfig
1373
+ base_model_prefix = "model"
1374
+ _no_split_modules = ["LLaDABlock", "LLaDASequentialBlock", "LLaDALlamaBlock"]
1375
+
1376
+ def __init__(self, config: LLaDAConfig, model: Optional[LLaDAModel] = None, init_params: bool = False):
1377
+ super().__init__(config)
1378
+
1379
+ if not model:
1380
+ model_config = create_model_config_from_pretrained_config(config)
1381
+ # Initialize model (always on CPU to start with so we don't run out of GPU memory).
1382
+ model_config.init_device = "cpu"
1383
+ self.model = LLaDAModel(model_config, init_params=init_params)
1384
+ else:
1385
+ self.model = model
1386
+
1387
+ def forward(
1388
+ self,
1389
+ input_ids: torch.LongTensor = None,
1390
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1391
+ attention_mask: Optional[torch.Tensor] = None,
1392
+ attention_bias: Optional[torch.Tensor] = None,
1393
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1394
+ labels: Optional[torch.LongTensor] = None,
1395
+ use_cache: Optional[bool] = None,
1396
+ output_attentions: Optional[bool] = None,
1397
+ output_hidden_states: Optional[bool] = None,
1398
+ return_dict: Optional[bool] = None,
1399
+ cache_position: Optional[Cache] = None, # This is a hack mitigation of an issue in transformers `4.39.x`
1400
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1401
+ if use_cache is None:
1402
+ use_cache = self.config.use_cache
1403
+
1404
+ if output_attentions:
1405
+ raise ValueError("output_attentions is not yet supported in LLaDA")
1406
+
1407
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1408
+
1409
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1410
+ outputs = self.model.forward(
1411
+ input_ids=input_ids,
1412
+ input_embeddings=inputs_embeds,
1413
+ attention_mask=attention_mask,
1414
+ attention_bias=attention_bias,
1415
+ past_key_values=past_key_values,
1416
+ use_cache=use_cache,
1417
+ output_hidden_states=output_hidden_states,
1418
+ )
1419
+
1420
+ logits = outputs.logits
1421
+ hidden_states = outputs.hidden_states
1422
+
1423
+ loss = None
1424
+ if labels is not None:
1425
+ import warnings
1426
+ warnings.warn("Note that for LLaDA, you cannot calculate the loss here.", UserWarning)
1427
+ if not return_dict:
1428
+ output = (logits,) + outputs[1:]
1429
+ return (loss,) + output if loss is not None else output
1430
+
1431
+ return CausalLMOutputWithPast(
1432
+ logits=logits,
1433
+ past_key_values=outputs.attn_key_values,
1434
+ hidden_states=hidden_states,
1435
+ )
1436
+
1437
+ def can_generate(self) -> bool:
1438
+ return True
1439
+
1440
+ def prepare_inputs_for_generation(
1441
+ self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple]] = None, **kwargs
1442
+ ):
1443
+ if past_key_values:
1444
+ # This is because we want the model to only process the last generated token.
1445
+ input_ids = input_ids[:, -1:]
1446
+ model_inputs = {"input_ids": input_ids, "past_key_values": past_key_values}
1447
+
1448
+ model_inputs.update(kwargs)
1449
+ model_inputs["use_cache"] = kwargs.pop("use_cache", self.config.use_cache)
1450
+ return model_inputs
1451
+
1452
+ # TODO: these are required to make the implementation complete.
1453
+ # def resize_position_embeddings(self, new_num_position_embeddings: int):
1454
+ # pass
1455
+ #
1456
+ # def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]:
1457
+ # pass
1458
+ #
1459
+ # def _reorder_cache(self, past_key_values, beam_idx):
1460
+ # pass
1461
+
1462
+ def get_input_embeddings(self) -> torch.nn.Module:
1463
+ return self.model.transformer.wte
1464
+
1465
+ def set_input_embeddings(self, value: torch.nn.Module):
1466
+ self.model.transformer.wte = value
1467
+
1468
+ def get_output_embeddings(self):
1469
+ if self.config.weight_tying:
1470
+ return self.model.transformer.wte
1471
+ else:
1472
+ return self.model.transformer.ff_out
1473
+
1474
+ def set_output_embeddings(self, value: torch.nn.Module):
1475
+ if self.config.weight_tying:
1476
+ self.model.transformer.wte = value
1477
+ else:
1478
+ self.model.transformer.ff_out = value
1479
+
1480
+ def tie_weights(self):
1481
+ if self.config.weight_tying:
1482
+ self.model.transformer.ff_out = self.model.transformer.wte
1483
+
1484
+ # Register the model so that it is available for transformer pipelines, auto-loading, etc.
1485
+ AutoModel.register(LLaDAConfig, LLaDAModelLM)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa0ea13d7da9643482e749d60bb3fcf9051448d8ca759f189264632131a46fdf
3
+ size 1239520640
special_tokens_map.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<role>",
4
+ "</role>",
5
+ "<|arithmetic_start|>",
6
+ "<|arithmetic_end|>",
7
+ "<|number_start|>",
8
+ "<|number_end|>"
9
+ ],
10
+ "bos_token": {
11
+ "content": "<|startoftext|>",
12
+ "lstrip": false,
13
+ "normalized": false,
14
+ "rstrip": false,
15
+ "single_word": false
16
+ },
17
+ "cls_token": {
18
+ "content": "[CLS]",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "eos_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ "pad_token": {
32
+ "content": "<|endoftext|>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ }
38
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,2182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "126080": {
6
+ "content": "<|startoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "126081": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "126082": {
22
+ "content": "[CLS]",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "126083": {
30
+ "content": "[gMASK]",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "126084": {
38
+ "content": "<|reserved_token_0|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "126085": {
46
+ "content": "<|reserved_token_1|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "126086": {
54
+ "content": "<|reserved_token_2|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "126087": {
62
+ "content": "<|reserved_token_3|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "126088": {
70
+ "content": "<|reserved_token_4|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "126089": {
78
+ "content": "<|reserved_token_5|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "126090": {
86
+ "content": "<|reserved_token_6|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "126091": {
94
+ "content": "<|reserved_token_7|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "126092": {
102
+ "content": "<|reserved_token_8|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "126093": {
110
+ "content": "<|reserved_token_9|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "126094": {
118
+ "content": "<|reserved_token_10|>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": true
124
+ },
125
+ "126095": {
126
+ "content": "<|reserved_token_11|>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": true
132
+ },
133
+ "126096": {
134
+ "content": "<|reserved_token_12|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": true
140
+ },
141
+ "126097": {
142
+ "content": "<|reserved_token_13|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": true
148
+ },
149
+ "126098": {
150
+ "content": "<|reserved_token_14|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": true
156
+ },
157
+ "126099": {
158
+ "content": "<|reserved_token_15|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": true
164
+ },
165
+ "126100": {
166
+ "content": "<|reserved_token_16|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": true
172
+ },
173
+ "126101": {
174
+ "content": "<|reserved_token_17|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": true
180
+ },
181
+ "126102": {
182
+ "content": "<|reserved_token_18|>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": true
188
+ },
189
+ "126103": {
190
+ "content": "<|reserved_token_19|>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": true
196
+ },
197
+ "126104": {
198
+ "content": "<|reserved_token_20|>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": true
204
+ },
205
+ "126105": {
206
+ "content": "<|reserved_token_21|>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": true
212
+ },
213
+ "126106": {
214
+ "content": "<|reserved_token_22|>",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false,
219
+ "special": true
220
+ },
221
+ "126107": {
222
+ "content": "<|reserved_token_23|>",
223
+ "lstrip": false,
224
+ "normalized": false,
225
+ "rstrip": false,
226
+ "single_word": false,
227
+ "special": true
228
+ },
229
+ "126108": {
230
+ "content": "<|reserved_token_24|>",
231
+ "lstrip": false,
232
+ "normalized": false,
233
+ "rstrip": false,
234
+ "single_word": false,
235
+ "special": true
236
+ },
237
+ "126109": {
238
+ "content": "<|reserved_token_25|>",
239
+ "lstrip": false,
240
+ "normalized": false,
241
+ "rstrip": false,
242
+ "single_word": false,
243
+ "special": true
244
+ },
245
+ "126110": {
246
+ "content": "<|reserved_token_26|>",
247
+ "lstrip": false,
248
+ "normalized": false,
249
+ "rstrip": false,
250
+ "single_word": false,
251
+ "special": true
252
+ },
253
+ "126111": {
254
+ "content": "<|reserved_token_27|>",
255
+ "lstrip": false,
256
+ "normalized": false,
257
+ "rstrip": false,
258
+ "single_word": false,
259
+ "special": true
260
+ },
261
+ "126112": {
262
+ "content": "<|reserved_token_28|>",
263
+ "lstrip": false,
264
+ "normalized": false,
265
+ "rstrip": false,
266
+ "single_word": false,
267
+ "special": true
268
+ },
269
+ "126113": {
270
+ "content": "<|reserved_token_29|>",
271
+ "lstrip": false,
272
+ "normalized": false,
273
+ "rstrip": false,
274
+ "single_word": false,
275
+ "special": true
276
+ },
277
+ "126114": {
278
+ "content": "<|reserved_token_30|>",
279
+ "lstrip": false,
280
+ "normalized": false,
281
+ "rstrip": false,
282
+ "single_word": false,
283
+ "special": true
284
+ },
285
+ "126115": {
286
+ "content": "<|reserved_token_31|>",
287
+ "lstrip": false,
288
+ "normalized": false,
289
+ "rstrip": false,
290
+ "single_word": false,
291
+ "special": true
292
+ },
293
+ "126116": {
294
+ "content": "<|reserved_token_32|>",
295
+ "lstrip": false,
296
+ "normalized": false,
297
+ "rstrip": false,
298
+ "single_word": false,
299
+ "special": true
300
+ },
301
+ "126117": {
302
+ "content": "<|reserved_token_33|>",
303
+ "lstrip": false,
304
+ "normalized": false,
305
+ "rstrip": false,
306
+ "single_word": false,
307
+ "special": true
308
+ },
309
+ "126118": {
310
+ "content": "<|reserved_token_34|>",
311
+ "lstrip": false,
312
+ "normalized": false,
313
+ "rstrip": false,
314
+ "single_word": false,
315
+ "special": true
316
+ },
317
+ "126119": {
318
+ "content": "<|reserved_token_35|>",
319
+ "lstrip": false,
320
+ "normalized": false,
321
+ "rstrip": false,
322
+ "single_word": false,
323
+ "special": true
324
+ },
325
+ "126120": {
326
+ "content": "<|reserved_token_36|>",
327
+ "lstrip": false,
328
+ "normalized": false,
329
+ "rstrip": false,
330
+ "single_word": false,
331
+ "special": true
332
+ },
333
+ "126121": {
334
+ "content": "<|reserved_token_37|>",
335
+ "lstrip": false,
336
+ "normalized": false,
337
+ "rstrip": false,
338
+ "single_word": false,
339
+ "special": true
340
+ },
341
+ "126122": {
342
+ "content": "<|reserved_token_38|>",
343
+ "lstrip": false,
344
+ "normalized": false,
345
+ "rstrip": false,
346
+ "single_word": false,
347
+ "special": true
348
+ },
349
+ "126123": {
350
+ "content": "<|reserved_token_39|>",
351
+ "lstrip": false,
352
+ "normalized": false,
353
+ "rstrip": false,
354
+ "single_word": false,
355
+ "special": true
356
+ },
357
+ "126124": {
358
+ "content": "<|reserved_token_40|>",
359
+ "lstrip": false,
360
+ "normalized": false,
361
+ "rstrip": false,
362
+ "single_word": false,
363
+ "special": true
364
+ },
365
+ "126125": {
366
+ "content": "<|reserved_token_41|>",
367
+ "lstrip": false,
368
+ "normalized": false,
369
+ "rstrip": false,
370
+ "single_word": false,
371
+ "special": true
372
+ },
373
+ "126126": {
374
+ "content": "<|reserved_token_42|>",
375
+ "lstrip": false,
376
+ "normalized": false,
377
+ "rstrip": false,
378
+ "single_word": false,
379
+ "special": true
380
+ },
381
+ "126127": {
382
+ "content": "<|reserved_token_43|>",
383
+ "lstrip": false,
384
+ "normalized": false,
385
+ "rstrip": false,
386
+ "single_word": false,
387
+ "special": true
388
+ },
389
+ "126128": {
390
+ "content": "<|reserved_token_44|>",
391
+ "lstrip": false,
392
+ "normalized": false,
393
+ "rstrip": false,
394
+ "single_word": false,
395
+ "special": true
396
+ },
397
+ "126129": {
398
+ "content": "<|reserved_token_45|>",
399
+ "lstrip": false,
400
+ "normalized": false,
401
+ "rstrip": false,
402
+ "single_word": false,
403
+ "special": true
404
+ },
405
+ "126130": {
406
+ "content": "<|reserved_token_46|>",
407
+ "lstrip": false,
408
+ "normalized": false,
409
+ "rstrip": false,
410
+ "single_word": false,
411
+ "special": true
412
+ },
413
+ "126131": {
414
+ "content": "<|reserved_token_47|>",
415
+ "lstrip": false,
416
+ "normalized": false,
417
+ "rstrip": false,
418
+ "single_word": false,
419
+ "special": true
420
+ },
421
+ "126132": {
422
+ "content": "<|reserved_token_48|>",
423
+ "lstrip": false,
424
+ "normalized": false,
425
+ "rstrip": false,
426
+ "single_word": false,
427
+ "special": true
428
+ },
429
+ "126133": {
430
+ "content": "<|reserved_token_49|>",
431
+ "lstrip": false,
432
+ "normalized": false,
433
+ "rstrip": false,
434
+ "single_word": false,
435
+ "special": true
436
+ },
437
+ "126134": {
438
+ "content": "<|reserved_token_50|>",
439
+ "lstrip": false,
440
+ "normalized": false,
441
+ "rstrip": false,
442
+ "single_word": false,
443
+ "special": true
444
+ },
445
+ "126135": {
446
+ "content": "<|reserved_token_51|>",
447
+ "lstrip": false,
448
+ "normalized": false,
449
+ "rstrip": false,
450
+ "single_word": false,
451
+ "special": true
452
+ },
453
+ "126136": {
454
+ "content": "<|reserved_token_52|>",
455
+ "lstrip": false,
456
+ "normalized": false,
457
+ "rstrip": false,
458
+ "single_word": false,
459
+ "special": true
460
+ },
461
+ "126137": {
462
+ "content": "<|reserved_token_53|>",
463
+ "lstrip": false,
464
+ "normalized": false,
465
+ "rstrip": false,
466
+ "single_word": false,
467
+ "special": true
468
+ },
469
+ "126138": {
470
+ "content": "<|reserved_token_54|>",
471
+ "lstrip": false,
472
+ "normalized": false,
473
+ "rstrip": false,
474
+ "single_word": false,
475
+ "special": true
476
+ },
477
+ "126139": {
478
+ "content": "<|reserved_token_55|>",
479
+ "lstrip": false,
480
+ "normalized": false,
481
+ "rstrip": false,
482
+ "single_word": false,
483
+ "special": true
484
+ },
485
+ "126140": {
486
+ "content": "<|reserved_token_56|>",
487
+ "lstrip": false,
488
+ "normalized": false,
489
+ "rstrip": false,
490
+ "single_word": false,
491
+ "special": true
492
+ },
493
+ "126141": {
494
+ "content": "<|reserved_token_57|>",
495
+ "lstrip": false,
496
+ "normalized": false,
497
+ "rstrip": false,
498
+ "single_word": false,
499
+ "special": true
500
+ },
501
+ "126142": {
502
+ "content": "<|reserved_token_58|>",
503
+ "lstrip": false,
504
+ "normalized": false,
505
+ "rstrip": false,
506
+ "single_word": false,
507
+ "special": true
508
+ },
509
+ "126143": {
510
+ "content": "<|reserved_token_59|>",
511
+ "lstrip": false,
512
+ "normalized": false,
513
+ "rstrip": false,
514
+ "single_word": false,
515
+ "special": true
516
+ },
517
+ "126144": {
518
+ "content": "<|reserved_token_60|>",
519
+ "lstrip": false,
520
+ "normalized": false,
521
+ "rstrip": false,
522
+ "single_word": false,
523
+ "special": true
524
+ },
525
+ "126145": {
526
+ "content": "<|reserved_token_61|>",
527
+ "lstrip": false,
528
+ "normalized": false,
529
+ "rstrip": false,
530
+ "single_word": false,
531
+ "special": true
532
+ },
533
+ "126146": {
534
+ "content": "<|reserved_token_62|>",
535
+ "lstrip": false,
536
+ "normalized": false,
537
+ "rstrip": false,
538
+ "single_word": false,
539
+ "special": true
540
+ },
541
+ "126147": {
542
+ "content": "<|reserved_token_63|>",
543
+ "lstrip": false,
544
+ "normalized": false,
545
+ "rstrip": false,
546
+ "single_word": false,
547
+ "special": true
548
+ },
549
+ "126148": {
550
+ "content": "<|reserved_token_64|>",
551
+ "lstrip": false,
552
+ "normalized": false,
553
+ "rstrip": false,
554
+ "single_word": false,
555
+ "special": true
556
+ },
557
+ "126149": {
558
+ "content": "<|reserved_token_65|>",
559
+ "lstrip": false,
560
+ "normalized": false,
561
+ "rstrip": false,
562
+ "single_word": false,
563
+ "special": true
564
+ },
565
+ "126150": {
566
+ "content": "<|reserved_token_66|>",
567
+ "lstrip": false,
568
+ "normalized": false,
569
+ "rstrip": false,
570
+ "single_word": false,
571
+ "special": true
572
+ },
573
+ "126151": {
574
+ "content": "<|reserved_token_67|>",
575
+ "lstrip": false,
576
+ "normalized": false,
577
+ "rstrip": false,
578
+ "single_word": false,
579
+ "special": true
580
+ },
581
+ "126152": {
582
+ "content": "<|reserved_token_68|>",
583
+ "lstrip": false,
584
+ "normalized": false,
585
+ "rstrip": false,
586
+ "single_word": false,
587
+ "special": true
588
+ },
589
+ "126153": {
590
+ "content": "<|reserved_token_69|>",
591
+ "lstrip": false,
592
+ "normalized": false,
593
+ "rstrip": false,
594
+ "single_word": false,
595
+ "special": true
596
+ },
597
+ "126154": {
598
+ "content": "<|reserved_token_70|>",
599
+ "lstrip": false,
600
+ "normalized": false,
601
+ "rstrip": false,
602
+ "single_word": false,
603
+ "special": true
604
+ },
605
+ "126155": {
606
+ "content": "<|reserved_token_71|>",
607
+ "lstrip": false,
608
+ "normalized": false,
609
+ "rstrip": false,
610
+ "single_word": false,
611
+ "special": true
612
+ },
613
+ "126156": {
614
+ "content": "<|reserved_token_72|>",
615
+ "lstrip": false,
616
+ "normalized": false,
617
+ "rstrip": false,
618
+ "single_word": false,
619
+ "special": true
620
+ },
621
+ "126157": {
622
+ "content": "<|reserved_token_73|>",
623
+ "lstrip": false,
624
+ "normalized": false,
625
+ "rstrip": false,
626
+ "single_word": false,
627
+ "special": true
628
+ },
629
+ "126158": {
630
+ "content": "<|reserved_token_74|>",
631
+ "lstrip": false,
632
+ "normalized": false,
633
+ "rstrip": false,
634
+ "single_word": false,
635
+ "special": true
636
+ },
637
+ "126159": {
638
+ "content": "<|reserved_token_75|>",
639
+ "lstrip": false,
640
+ "normalized": false,
641
+ "rstrip": false,
642
+ "single_word": false,
643
+ "special": true
644
+ },
645
+ "126160": {
646
+ "content": "<|reserved_token_76|>",
647
+ "lstrip": false,
648
+ "normalized": false,
649
+ "rstrip": false,
650
+ "single_word": false,
651
+ "special": true
652
+ },
653
+ "126161": {
654
+ "content": "<|reserved_token_77|>",
655
+ "lstrip": false,
656
+ "normalized": false,
657
+ "rstrip": false,
658
+ "single_word": false,
659
+ "special": true
660
+ },
661
+ "126162": {
662
+ "content": "<|reserved_token_78|>",
663
+ "lstrip": false,
664
+ "normalized": false,
665
+ "rstrip": false,
666
+ "single_word": false,
667
+ "special": true
668
+ },
669
+ "126163": {
670
+ "content": "<|reserved_token_79|>",
671
+ "lstrip": false,
672
+ "normalized": false,
673
+ "rstrip": false,
674
+ "single_word": false,
675
+ "special": true
676
+ },
677
+ "126164": {
678
+ "content": "<|reserved_token_80|>",
679
+ "lstrip": false,
680
+ "normalized": false,
681
+ "rstrip": false,
682
+ "single_word": false,
683
+ "special": true
684
+ },
685
+ "126165": {
686
+ "content": "<|reserved_token_81|>",
687
+ "lstrip": false,
688
+ "normalized": false,
689
+ "rstrip": false,
690
+ "single_word": false,
691
+ "special": true
692
+ },
693
+ "126166": {
694
+ "content": "<|reserved_token_82|>",
695
+ "lstrip": false,
696
+ "normalized": false,
697
+ "rstrip": false,
698
+ "single_word": false,
699
+ "special": true
700
+ },
701
+ "126167": {
702
+ "content": "<|reserved_token_83|>",
703
+ "lstrip": false,
704
+ "normalized": false,
705
+ "rstrip": false,
706
+ "single_word": false,
707
+ "special": true
708
+ },
709
+ "126168": {
710
+ "content": "<|reserved_token_84|>",
711
+ "lstrip": false,
712
+ "normalized": false,
713
+ "rstrip": false,
714
+ "single_word": false,
715
+ "special": true
716
+ },
717
+ "126169": {
718
+ "content": "<|reserved_token_85|>",
719
+ "lstrip": false,
720
+ "normalized": false,
721
+ "rstrip": false,
722
+ "single_word": false,
723
+ "special": true
724
+ },
725
+ "126170": {
726
+ "content": "<|reserved_token_86|>",
727
+ "lstrip": false,
728
+ "normalized": false,
729
+ "rstrip": false,
730
+ "single_word": false,
731
+ "special": true
732
+ },
733
+ "126171": {
734
+ "content": "<|reserved_token_87|>",
735
+ "lstrip": false,
736
+ "normalized": false,
737
+ "rstrip": false,
738
+ "single_word": false,
739
+ "special": true
740
+ },
741
+ "126172": {
742
+ "content": "<|reserved_token_88|>",
743
+ "lstrip": false,
744
+ "normalized": false,
745
+ "rstrip": false,
746
+ "single_word": false,
747
+ "special": true
748
+ },
749
+ "126173": {
750
+ "content": "<|reserved_token_89|>",
751
+ "lstrip": false,
752
+ "normalized": false,
753
+ "rstrip": false,
754
+ "single_word": false,
755
+ "special": true
756
+ },
757
+ "126174": {
758
+ "content": "<|reserved_token_90|>",
759
+ "lstrip": false,
760
+ "normalized": false,
761
+ "rstrip": false,
762
+ "single_word": false,
763
+ "special": true
764
+ },
765
+ "126175": {
766
+ "content": "<|reserved_token_91|>",
767
+ "lstrip": false,
768
+ "normalized": false,
769
+ "rstrip": false,
770
+ "single_word": false,
771
+ "special": true
772
+ },
773
+ "126176": {
774
+ "content": "<|reserved_token_92|>",
775
+ "lstrip": false,
776
+ "normalized": false,
777
+ "rstrip": false,
778
+ "single_word": false,
779
+ "special": true
780
+ },
781
+ "126177": {
782
+ "content": "<|reserved_token_93|>",
783
+ "lstrip": false,
784
+ "normalized": false,
785
+ "rstrip": false,
786
+ "single_word": false,
787
+ "special": true
788
+ },
789
+ "126178": {
790
+ "content": "<|reserved_token_94|>",
791
+ "lstrip": false,
792
+ "normalized": false,
793
+ "rstrip": false,
794
+ "single_word": false,
795
+ "special": true
796
+ },
797
+ "126179": {
798
+ "content": "<|reserved_token_95|>",
799
+ "lstrip": false,
800
+ "normalized": false,
801
+ "rstrip": false,
802
+ "single_word": false,
803
+ "special": true
804
+ },
805
+ "126180": {
806
+ "content": "<|reserved_token_96|>",
807
+ "lstrip": false,
808
+ "normalized": false,
809
+ "rstrip": false,
810
+ "single_word": false,
811
+ "special": true
812
+ },
813
+ "126181": {
814
+ "content": "<|reserved_token_97|>",
815
+ "lstrip": false,
816
+ "normalized": false,
817
+ "rstrip": false,
818
+ "single_word": false,
819
+ "special": true
820
+ },
821
+ "126182": {
822
+ "content": "<|reserved_token_98|>",
823
+ "lstrip": false,
824
+ "normalized": false,
825
+ "rstrip": false,
826
+ "single_word": false,
827
+ "special": true
828
+ },
829
+ "126183": {
830
+ "content": "<|reserved_token_99|>",
831
+ "lstrip": false,
832
+ "normalized": false,
833
+ "rstrip": false,
834
+ "single_word": false,
835
+ "special": true
836
+ },
837
+ "126184": {
838
+ "content": "<|reserved_token_100|>",
839
+ "lstrip": false,
840
+ "normalized": false,
841
+ "rstrip": false,
842
+ "single_word": false,
843
+ "special": true
844
+ },
845
+ "126185": {
846
+ "content": "<|reserved_token_101|>",
847
+ "lstrip": false,
848
+ "normalized": false,
849
+ "rstrip": false,
850
+ "single_word": false,
851
+ "special": true
852
+ },
853
+ "126186": {
854
+ "content": "<|reserved_token_102|>",
855
+ "lstrip": false,
856
+ "normalized": false,
857
+ "rstrip": false,
858
+ "single_word": false,
859
+ "special": true
860
+ },
861
+ "126187": {
862
+ "content": "<|reserved_token_103|>",
863
+ "lstrip": false,
864
+ "normalized": false,
865
+ "rstrip": false,
866
+ "single_word": false,
867
+ "special": true
868
+ },
869
+ "126188": {
870
+ "content": "<|reserved_token_104|>",
871
+ "lstrip": false,
872
+ "normalized": false,
873
+ "rstrip": false,
874
+ "single_word": false,
875
+ "special": true
876
+ },
877
+ "126189": {
878
+ "content": "<|reserved_token_105|>",
879
+ "lstrip": false,
880
+ "normalized": false,
881
+ "rstrip": false,
882
+ "single_word": false,
883
+ "special": true
884
+ },
885
+ "126190": {
886
+ "content": "<|reserved_token_106|>",
887
+ "lstrip": false,
888
+ "normalized": false,
889
+ "rstrip": false,
890
+ "single_word": false,
891
+ "special": true
892
+ },
893
+ "126191": {
894
+ "content": "<|reserved_token_107|>",
895
+ "lstrip": false,
896
+ "normalized": false,
897
+ "rstrip": false,
898
+ "single_word": false,
899
+ "special": true
900
+ },
901
+ "126192": {
902
+ "content": "<|reserved_token_108|>",
903
+ "lstrip": false,
904
+ "normalized": false,
905
+ "rstrip": false,
906
+ "single_word": false,
907
+ "special": true
908
+ },
909
+ "126193": {
910
+ "content": "<|reserved_token_109|>",
911
+ "lstrip": false,
912
+ "normalized": false,
913
+ "rstrip": false,
914
+ "single_word": false,
915
+ "special": true
916
+ },
917
+ "126194": {
918
+ "content": "<|reserved_token_110|>",
919
+ "lstrip": false,
920
+ "normalized": false,
921
+ "rstrip": false,
922
+ "single_word": false,
923
+ "special": true
924
+ },
925
+ "126195": {
926
+ "content": "<|reserved_token_111|>",
927
+ "lstrip": false,
928
+ "normalized": false,
929
+ "rstrip": false,
930
+ "single_word": false,
931
+ "special": true
932
+ },
933
+ "126196": {
934
+ "content": "<|reserved_token_112|>",
935
+ "lstrip": false,
936
+ "normalized": false,
937
+ "rstrip": false,
938
+ "single_word": false,
939
+ "special": true
940
+ },
941
+ "126197": {
942
+ "content": "<|reserved_token_113|>",
943
+ "lstrip": false,
944
+ "normalized": false,
945
+ "rstrip": false,
946
+ "single_word": false,
947
+ "special": true
948
+ },
949
+ "126198": {
950
+ "content": "<|reserved_token_114|>",
951
+ "lstrip": false,
952
+ "normalized": false,
953
+ "rstrip": false,
954
+ "single_word": false,
955
+ "special": true
956
+ },
957
+ "126199": {
958
+ "content": "<|reserved_token_115|>",
959
+ "lstrip": false,
960
+ "normalized": false,
961
+ "rstrip": false,
962
+ "single_word": false,
963
+ "special": true
964
+ },
965
+ "126200": {
966
+ "content": "<|reserved_token_116|>",
967
+ "lstrip": false,
968
+ "normalized": false,
969
+ "rstrip": false,
970
+ "single_word": false,
971
+ "special": true
972
+ },
973
+ "126201": {
974
+ "content": "<|reserved_token_117|>",
975
+ "lstrip": false,
976
+ "normalized": false,
977
+ "rstrip": false,
978
+ "single_word": false,
979
+ "special": true
980
+ },
981
+ "126202": {
982
+ "content": "<|reserved_token_118|>",
983
+ "lstrip": false,
984
+ "normalized": false,
985
+ "rstrip": false,
986
+ "single_word": false,
987
+ "special": true
988
+ },
989
+ "126203": {
990
+ "content": "<|reserved_token_119|>",
991
+ "lstrip": false,
992
+ "normalized": false,
993
+ "rstrip": false,
994
+ "single_word": false,
995
+ "special": true
996
+ },
997
+ "126204": {
998
+ "content": "<|reserved_token_120|>",
999
+ "lstrip": false,
1000
+ "normalized": false,
1001
+ "rstrip": false,
1002
+ "single_word": false,
1003
+ "special": true
1004
+ },
1005
+ "126205": {
1006
+ "content": "<|reserved_token_121|>",
1007
+ "lstrip": false,
1008
+ "normalized": false,
1009
+ "rstrip": false,
1010
+ "single_word": false,
1011
+ "special": true
1012
+ },
1013
+ "126206": {
1014
+ "content": "<|reserved_token_122|>",
1015
+ "lstrip": false,
1016
+ "normalized": false,
1017
+ "rstrip": false,
1018
+ "single_word": false,
1019
+ "special": true
1020
+ },
1021
+ "126207": {
1022
+ "content": "<|reserved_token_123|>",
1023
+ "lstrip": false,
1024
+ "normalized": false,
1025
+ "rstrip": false,
1026
+ "single_word": false,
1027
+ "special": true
1028
+ },
1029
+ "126208": {
1030
+ "content": "<|reserved_token_124|>",
1031
+ "lstrip": false,
1032
+ "normalized": false,
1033
+ "rstrip": false,
1034
+ "single_word": false,
1035
+ "special": true
1036
+ },
1037
+ "126209": {
1038
+ "content": "<|reserved_token_125|>",
1039
+ "lstrip": false,
1040
+ "normalized": false,
1041
+ "rstrip": false,
1042
+ "single_word": false,
1043
+ "special": true
1044
+ },
1045
+ "126210": {
1046
+ "content": "<|reserved_token_126|>",
1047
+ "lstrip": false,
1048
+ "normalized": false,
1049
+ "rstrip": false,
1050
+ "single_word": false,
1051
+ "special": true
1052
+ },
1053
+ "126211": {
1054
+ "content": "<|reserved_token_127|>",
1055
+ "lstrip": false,
1056
+ "normalized": false,
1057
+ "rstrip": false,
1058
+ "single_word": false,
1059
+ "special": true
1060
+ },
1061
+ "126212": {
1062
+ "content": "<|reserved_token_128|>",
1063
+ "lstrip": false,
1064
+ "normalized": false,
1065
+ "rstrip": false,
1066
+ "single_word": false,
1067
+ "special": true
1068
+ },
1069
+ "126213": {
1070
+ "content": "<|reserved_token_129|>",
1071
+ "lstrip": false,
1072
+ "normalized": false,
1073
+ "rstrip": false,
1074
+ "single_word": false,
1075
+ "special": true
1076
+ },
1077
+ "126214": {
1078
+ "content": "<|reserved_token_130|>",
1079
+ "lstrip": false,
1080
+ "normalized": false,
1081
+ "rstrip": false,
1082
+ "single_word": false,
1083
+ "special": true
1084
+ },
1085
+ "126215": {
1086
+ "content": "<|reserved_token_131|>",
1087
+ "lstrip": false,
1088
+ "normalized": false,
1089
+ "rstrip": false,
1090
+ "single_word": false,
1091
+ "special": true
1092
+ },
1093
+ "126216": {
1094
+ "content": "<|reserved_token_132|>",
1095
+ "lstrip": false,
1096
+ "normalized": false,
1097
+ "rstrip": false,
1098
+ "single_word": false,
1099
+ "special": true
1100
+ },
1101
+ "126217": {
1102
+ "content": "<|reserved_token_133|>",
1103
+ "lstrip": false,
1104
+ "normalized": false,
1105
+ "rstrip": false,
1106
+ "single_word": false,
1107
+ "special": true
1108
+ },
1109
+ "126218": {
1110
+ "content": "<|reserved_token_134|>",
1111
+ "lstrip": false,
1112
+ "normalized": false,
1113
+ "rstrip": false,
1114
+ "single_word": false,
1115
+ "special": true
1116
+ },
1117
+ "126219": {
1118
+ "content": "<|reserved_token_135|>",
1119
+ "lstrip": false,
1120
+ "normalized": false,
1121
+ "rstrip": false,
1122
+ "single_word": false,
1123
+ "special": true
1124
+ },
1125
+ "126220": {
1126
+ "content": "<|reserved_token_136|>",
1127
+ "lstrip": false,
1128
+ "normalized": false,
1129
+ "rstrip": false,
1130
+ "single_word": false,
1131
+ "special": true
1132
+ },
1133
+ "126221": {
1134
+ "content": "<|reserved_token_137|>",
1135
+ "lstrip": false,
1136
+ "normalized": false,
1137
+ "rstrip": false,
1138
+ "single_word": false,
1139
+ "special": true
1140
+ },
1141
+ "126222": {
1142
+ "content": "<|reserved_token_138|>",
1143
+ "lstrip": false,
1144
+ "normalized": false,
1145
+ "rstrip": false,
1146
+ "single_word": false,
1147
+ "special": true
1148
+ },
1149
+ "126223": {
1150
+ "content": "<|reserved_token_139|>",
1151
+ "lstrip": false,
1152
+ "normalized": false,
1153
+ "rstrip": false,
1154
+ "single_word": false,
1155
+ "special": true
1156
+ },
1157
+ "126224": {
1158
+ "content": "<|reserved_token_140|>",
1159
+ "lstrip": false,
1160
+ "normalized": false,
1161
+ "rstrip": false,
1162
+ "single_word": false,
1163
+ "special": true
1164
+ },
1165
+ "126225": {
1166
+ "content": "<|reserved_token_141|>",
1167
+ "lstrip": false,
1168
+ "normalized": false,
1169
+ "rstrip": false,
1170
+ "single_word": false,
1171
+ "special": true
1172
+ },
1173
+ "126226": {
1174
+ "content": "<|reserved_token_142|>",
1175
+ "lstrip": false,
1176
+ "normalized": false,
1177
+ "rstrip": false,
1178
+ "single_word": false,
1179
+ "special": true
1180
+ },
1181
+ "126227": {
1182
+ "content": "<|reserved_token_143|>",
1183
+ "lstrip": false,
1184
+ "normalized": false,
1185
+ "rstrip": false,
1186
+ "single_word": false,
1187
+ "special": true
1188
+ },
1189
+ "126228": {
1190
+ "content": "<|reserved_token_144|>",
1191
+ "lstrip": false,
1192
+ "normalized": false,
1193
+ "rstrip": false,
1194
+ "single_word": false,
1195
+ "special": true
1196
+ },
1197
+ "126229": {
1198
+ "content": "<|reserved_token_145|>",
1199
+ "lstrip": false,
1200
+ "normalized": false,
1201
+ "rstrip": false,
1202
+ "single_word": false,
1203
+ "special": true
1204
+ },
1205
+ "126230": {
1206
+ "content": "<|reserved_token_146|>",
1207
+ "lstrip": false,
1208
+ "normalized": false,
1209
+ "rstrip": false,
1210
+ "single_word": false,
1211
+ "special": true
1212
+ },
1213
+ "126231": {
1214
+ "content": "<|reserved_token_147|>",
1215
+ "lstrip": false,
1216
+ "normalized": false,
1217
+ "rstrip": false,
1218
+ "single_word": false,
1219
+ "special": true
1220
+ },
1221
+ "126232": {
1222
+ "content": "<|reserved_token_148|>",
1223
+ "lstrip": false,
1224
+ "normalized": false,
1225
+ "rstrip": false,
1226
+ "single_word": false,
1227
+ "special": true
1228
+ },
1229
+ "126233": {
1230
+ "content": "<|reserved_token_149|>",
1231
+ "lstrip": false,
1232
+ "normalized": false,
1233
+ "rstrip": false,
1234
+ "single_word": false,
1235
+ "special": true
1236
+ },
1237
+ "126234": {
1238
+ "content": "<|reserved_token_150|>",
1239
+ "lstrip": false,
1240
+ "normalized": false,
1241
+ "rstrip": false,
1242
+ "single_word": false,
1243
+ "special": true
1244
+ },
1245
+ "126235": {
1246
+ "content": "<|reserved_token_151|>",
1247
+ "lstrip": false,
1248
+ "normalized": false,
1249
+ "rstrip": false,
1250
+ "single_word": false,
1251
+ "special": true
1252
+ },
1253
+ "126236": {
1254
+ "content": "<|reserved_token_152|>",
1255
+ "lstrip": false,
1256
+ "normalized": false,
1257
+ "rstrip": false,
1258
+ "single_word": false,
1259
+ "special": true
1260
+ },
1261
+ "126237": {
1262
+ "content": "<|reserved_token_153|>",
1263
+ "lstrip": false,
1264
+ "normalized": false,
1265
+ "rstrip": false,
1266
+ "single_word": false,
1267
+ "special": true
1268
+ },
1269
+ "126238": {
1270
+ "content": "<|reserved_token_154|>",
1271
+ "lstrip": false,
1272
+ "normalized": false,
1273
+ "rstrip": false,
1274
+ "single_word": false,
1275
+ "special": true
1276
+ },
1277
+ "126239": {
1278
+ "content": "<|reserved_token_155|>",
1279
+ "lstrip": false,
1280
+ "normalized": false,
1281
+ "rstrip": false,
1282
+ "single_word": false,
1283
+ "special": true
1284
+ },
1285
+ "126240": {
1286
+ "content": "<|reserved_token_156|>",
1287
+ "lstrip": false,
1288
+ "normalized": false,
1289
+ "rstrip": false,
1290
+ "single_word": false,
1291
+ "special": true
1292
+ },
1293
+ "126241": {
1294
+ "content": "<|reserved_token_157|>",
1295
+ "lstrip": false,
1296
+ "normalized": false,
1297
+ "rstrip": false,
1298
+ "single_word": false,
1299
+ "special": true
1300
+ },
1301
+ "126242": {
1302
+ "content": "<|reserved_token_158|>",
1303
+ "lstrip": false,
1304
+ "normalized": false,
1305
+ "rstrip": false,
1306
+ "single_word": false,
1307
+ "special": true
1308
+ },
1309
+ "126243": {
1310
+ "content": "<|reserved_token_159|>",
1311
+ "lstrip": false,
1312
+ "normalized": false,
1313
+ "rstrip": false,
1314
+ "single_word": false,
1315
+ "special": true
1316
+ },
1317
+ "126244": {
1318
+ "content": "<|reserved_token_160|>",
1319
+ "lstrip": false,
1320
+ "normalized": false,
1321
+ "rstrip": false,
1322
+ "single_word": false,
1323
+ "special": true
1324
+ },
1325
+ "126245": {
1326
+ "content": "<|reserved_token_161|>",
1327
+ "lstrip": false,
1328
+ "normalized": false,
1329
+ "rstrip": false,
1330
+ "single_word": false,
1331
+ "special": true
1332
+ },
1333
+ "126246": {
1334
+ "content": "<|reserved_token_162|>",
1335
+ "lstrip": false,
1336
+ "normalized": false,
1337
+ "rstrip": false,
1338
+ "single_word": false,
1339
+ "special": true
1340
+ },
1341
+ "126247": {
1342
+ "content": "<|reserved_token_163|>",
1343
+ "lstrip": false,
1344
+ "normalized": false,
1345
+ "rstrip": false,
1346
+ "single_word": false,
1347
+ "special": true
1348
+ },
1349
+ "126248": {
1350
+ "content": "<|reserved_token_164|>",
1351
+ "lstrip": false,
1352
+ "normalized": false,
1353
+ "rstrip": false,
1354
+ "single_word": false,
1355
+ "special": true
1356
+ },
1357
+ "126249": {
1358
+ "content": "<|reserved_token_165|>",
1359
+ "lstrip": false,
1360
+ "normalized": false,
1361
+ "rstrip": false,
1362
+ "single_word": false,
1363
+ "special": true
1364
+ },
1365
+ "126250": {
1366
+ "content": "<|reserved_token_166|>",
1367
+ "lstrip": false,
1368
+ "normalized": false,
1369
+ "rstrip": false,
1370
+ "single_word": false,
1371
+ "special": true
1372
+ },
1373
+ "126251": {
1374
+ "content": "<|reserved_token_167|>",
1375
+ "lstrip": false,
1376
+ "normalized": false,
1377
+ "rstrip": false,
1378
+ "single_word": false,
1379
+ "special": true
1380
+ },
1381
+ "126252": {
1382
+ "content": "<|reserved_token_168|>",
1383
+ "lstrip": false,
1384
+ "normalized": false,
1385
+ "rstrip": false,
1386
+ "single_word": false,
1387
+ "special": true
1388
+ },
1389
+ "126253": {
1390
+ "content": "<|reserved_token_169|>",
1391
+ "lstrip": false,
1392
+ "normalized": false,
1393
+ "rstrip": false,
1394
+ "single_word": false,
1395
+ "special": true
1396
+ },
1397
+ "126254": {
1398
+ "content": "<|reserved_token_170|>",
1399
+ "lstrip": false,
1400
+ "normalized": false,
1401
+ "rstrip": false,
1402
+ "single_word": false,
1403
+ "special": true
1404
+ },
1405
+ "126255": {
1406
+ "content": "<|reserved_token_171|>",
1407
+ "lstrip": false,
1408
+ "normalized": false,
1409
+ "rstrip": false,
1410
+ "single_word": false,
1411
+ "special": true
1412
+ },
1413
+ "126256": {
1414
+ "content": "<|reserved_token_172|>",
1415
+ "lstrip": false,
1416
+ "normalized": false,
1417
+ "rstrip": false,
1418
+ "single_word": false,
1419
+ "special": true
1420
+ },
1421
+ "126257": {
1422
+ "content": "<|reserved_token_173|>",
1423
+ "lstrip": false,
1424
+ "normalized": false,
1425
+ "rstrip": false,
1426
+ "single_word": false,
1427
+ "special": true
1428
+ },
1429
+ "126258": {
1430
+ "content": "<|reserved_token_174|>",
1431
+ "lstrip": false,
1432
+ "normalized": false,
1433
+ "rstrip": false,
1434
+ "single_word": false,
1435
+ "special": true
1436
+ },
1437
+ "126259": {
1438
+ "content": "<|reserved_token_175|>",
1439
+ "lstrip": false,
1440
+ "normalized": false,
1441
+ "rstrip": false,
1442
+ "single_word": false,
1443
+ "special": true
1444
+ },
1445
+ "126260": {
1446
+ "content": "<|reserved_token_176|>",
1447
+ "lstrip": false,
1448
+ "normalized": false,
1449
+ "rstrip": false,
1450
+ "single_word": false,
1451
+ "special": true
1452
+ },
1453
+ "126261": {
1454
+ "content": "<|reserved_token_177|>",
1455
+ "lstrip": false,
1456
+ "normalized": false,
1457
+ "rstrip": false,
1458
+ "single_word": false,
1459
+ "special": true
1460
+ },
1461
+ "126262": {
1462
+ "content": "<|reserved_token_178|>",
1463
+ "lstrip": false,
1464
+ "normalized": false,
1465
+ "rstrip": false,
1466
+ "single_word": false,
1467
+ "special": true
1468
+ },
1469
+ "126263": {
1470
+ "content": "<|reserved_token_179|>",
1471
+ "lstrip": false,
1472
+ "normalized": false,
1473
+ "rstrip": false,
1474
+ "single_word": false,
1475
+ "special": true
1476
+ },
1477
+ "126264": {
1478
+ "content": "<|reserved_token_180|>",
1479
+ "lstrip": false,
1480
+ "normalized": false,
1481
+ "rstrip": false,
1482
+ "single_word": false,
1483
+ "special": true
1484
+ },
1485
+ "126265": {
1486
+ "content": "<|reserved_token_181|>",
1487
+ "lstrip": false,
1488
+ "normalized": false,
1489
+ "rstrip": false,
1490
+ "single_word": false,
1491
+ "special": true
1492
+ },
1493
+ "126266": {
1494
+ "content": "<|reserved_token_182|>",
1495
+ "lstrip": false,
1496
+ "normalized": false,
1497
+ "rstrip": false,
1498
+ "single_word": false,
1499
+ "special": true
1500
+ },
1501
+ "126267": {
1502
+ "content": "<|reserved_token_183|>",
1503
+ "lstrip": false,
1504
+ "normalized": false,
1505
+ "rstrip": false,
1506
+ "single_word": false,
1507
+ "special": true
1508
+ },
1509
+ "126268": {
1510
+ "content": "<|reserved_token_184|>",
1511
+ "lstrip": false,
1512
+ "normalized": false,
1513
+ "rstrip": false,
1514
+ "single_word": false,
1515
+ "special": true
1516
+ },
1517
+ "126269": {
1518
+ "content": "<|reserved_token_185|>",
1519
+ "lstrip": false,
1520
+ "normalized": false,
1521
+ "rstrip": false,
1522
+ "single_word": false,
1523
+ "special": true
1524
+ },
1525
+ "126270": {
1526
+ "content": "<|reserved_token_186|>",
1527
+ "lstrip": false,
1528
+ "normalized": false,
1529
+ "rstrip": false,
1530
+ "single_word": false,
1531
+ "special": true
1532
+ },
1533
+ "126271": {
1534
+ "content": "<|reserved_token_187|>",
1535
+ "lstrip": false,
1536
+ "normalized": false,
1537
+ "rstrip": false,
1538
+ "single_word": false,
1539
+ "special": true
1540
+ },
1541
+ "126272": {
1542
+ "content": "<|reserved_token_188|>",
1543
+ "lstrip": false,
1544
+ "normalized": false,
1545
+ "rstrip": false,
1546
+ "single_word": false,
1547
+ "special": true
1548
+ },
1549
+ "126273": {
1550
+ "content": "<|reserved_token_189|>",
1551
+ "lstrip": false,
1552
+ "normalized": false,
1553
+ "rstrip": false,
1554
+ "single_word": false,
1555
+ "special": true
1556
+ },
1557
+ "126274": {
1558
+ "content": "<|reserved_token_190|>",
1559
+ "lstrip": false,
1560
+ "normalized": false,
1561
+ "rstrip": false,
1562
+ "single_word": false,
1563
+ "special": true
1564
+ },
1565
+ "126275": {
1566
+ "content": "<|reserved_token_191|>",
1567
+ "lstrip": false,
1568
+ "normalized": false,
1569
+ "rstrip": false,
1570
+ "single_word": false,
1571
+ "special": true
1572
+ },
1573
+ "126276": {
1574
+ "content": "<|reserved_token_192|>",
1575
+ "lstrip": false,
1576
+ "normalized": false,
1577
+ "rstrip": false,
1578
+ "single_word": false,
1579
+ "special": true
1580
+ },
1581
+ "126277": {
1582
+ "content": "<|reserved_token_193|>",
1583
+ "lstrip": false,
1584
+ "normalized": false,
1585
+ "rstrip": false,
1586
+ "single_word": false,
1587
+ "special": true
1588
+ },
1589
+ "126278": {
1590
+ "content": "<|reserved_token_194|>",
1591
+ "lstrip": false,
1592
+ "normalized": false,
1593
+ "rstrip": false,
1594
+ "single_word": false,
1595
+ "special": true
1596
+ },
1597
+ "126279": {
1598
+ "content": "<|reserved_token_195|>",
1599
+ "lstrip": false,
1600
+ "normalized": false,
1601
+ "rstrip": false,
1602
+ "single_word": false,
1603
+ "special": true
1604
+ },
1605
+ "126280": {
1606
+ "content": "<|reserved_token_196|>",
1607
+ "lstrip": false,
1608
+ "normalized": false,
1609
+ "rstrip": false,
1610
+ "single_word": false,
1611
+ "special": true
1612
+ },
1613
+ "126281": {
1614
+ "content": "<|reserved_token_197|>",
1615
+ "lstrip": false,
1616
+ "normalized": false,
1617
+ "rstrip": false,
1618
+ "single_word": false,
1619
+ "special": true
1620
+ },
1621
+ "126282": {
1622
+ "content": "<|reserved_token_198|>",
1623
+ "lstrip": false,
1624
+ "normalized": false,
1625
+ "rstrip": false,
1626
+ "single_word": false,
1627
+ "special": true
1628
+ },
1629
+ "126283": {
1630
+ "content": "<|reserved_token_199|>",
1631
+ "lstrip": false,
1632
+ "normalized": false,
1633
+ "rstrip": false,
1634
+ "single_word": false,
1635
+ "special": true
1636
+ },
1637
+ "126284": {
1638
+ "content": "<|reserved_token_200|>",
1639
+ "lstrip": false,
1640
+ "normalized": false,
1641
+ "rstrip": false,
1642
+ "single_word": false,
1643
+ "special": true
1644
+ },
1645
+ "126285": {
1646
+ "content": "<|reserved_token_201|>",
1647
+ "lstrip": false,
1648
+ "normalized": false,
1649
+ "rstrip": false,
1650
+ "single_word": false,
1651
+ "special": true
1652
+ },
1653
+ "126286": {
1654
+ "content": "<|reserved_token_202|>",
1655
+ "lstrip": false,
1656
+ "normalized": false,
1657
+ "rstrip": false,
1658
+ "single_word": false,
1659
+ "special": true
1660
+ },
1661
+ "126287": {
1662
+ "content": "<|reserved_token_203|>",
1663
+ "lstrip": false,
1664
+ "normalized": false,
1665
+ "rstrip": false,
1666
+ "single_word": false,
1667
+ "special": true
1668
+ },
1669
+ "126288": {
1670
+ "content": "<|reserved_token_204|>",
1671
+ "lstrip": false,
1672
+ "normalized": false,
1673
+ "rstrip": false,
1674
+ "single_word": false,
1675
+ "special": true
1676
+ },
1677
+ "126289": {
1678
+ "content": "<|reserved_token_205|>",
1679
+ "lstrip": false,
1680
+ "normalized": false,
1681
+ "rstrip": false,
1682
+ "single_word": false,
1683
+ "special": true
1684
+ },
1685
+ "126290": {
1686
+ "content": "<|reserved_token_206|>",
1687
+ "lstrip": false,
1688
+ "normalized": false,
1689
+ "rstrip": false,
1690
+ "single_word": false,
1691
+ "special": true
1692
+ },
1693
+ "126291": {
1694
+ "content": "<|reserved_token_207|>",
1695
+ "lstrip": false,
1696
+ "normalized": false,
1697
+ "rstrip": false,
1698
+ "single_word": false,
1699
+ "special": true
1700
+ },
1701
+ "126292": {
1702
+ "content": "<|reserved_token_208|>",
1703
+ "lstrip": false,
1704
+ "normalized": false,
1705
+ "rstrip": false,
1706
+ "single_word": false,
1707
+ "special": true
1708
+ },
1709
+ "126293": {
1710
+ "content": "<|reserved_token_209|>",
1711
+ "lstrip": false,
1712
+ "normalized": false,
1713
+ "rstrip": false,
1714
+ "single_word": false,
1715
+ "special": true
1716
+ },
1717
+ "126294": {
1718
+ "content": "<|reserved_token_210|>",
1719
+ "lstrip": false,
1720
+ "normalized": false,
1721
+ "rstrip": false,
1722
+ "single_word": false,
1723
+ "special": true
1724
+ },
1725
+ "126295": {
1726
+ "content": "<|reserved_token_211|>",
1727
+ "lstrip": false,
1728
+ "normalized": false,
1729
+ "rstrip": false,
1730
+ "single_word": false,
1731
+ "special": true
1732
+ },
1733
+ "126296": {
1734
+ "content": "<|reserved_token_212|>",
1735
+ "lstrip": false,
1736
+ "normalized": false,
1737
+ "rstrip": false,
1738
+ "single_word": false,
1739
+ "special": true
1740
+ },
1741
+ "126297": {
1742
+ "content": "<|reserved_token_213|>",
1743
+ "lstrip": false,
1744
+ "normalized": false,
1745
+ "rstrip": false,
1746
+ "single_word": false,
1747
+ "special": true
1748
+ },
1749
+ "126298": {
1750
+ "content": "<|reserved_token_214|>",
1751
+ "lstrip": false,
1752
+ "normalized": false,
1753
+ "rstrip": false,
1754
+ "single_word": false,
1755
+ "special": true
1756
+ },
1757
+ "126299": {
1758
+ "content": "<|reserved_token_215|>",
1759
+ "lstrip": false,
1760
+ "normalized": false,
1761
+ "rstrip": false,
1762
+ "single_word": false,
1763
+ "special": true
1764
+ },
1765
+ "126300": {
1766
+ "content": "<|reserved_token_216|>",
1767
+ "lstrip": false,
1768
+ "normalized": false,
1769
+ "rstrip": false,
1770
+ "single_word": false,
1771
+ "special": true
1772
+ },
1773
+ "126301": {
1774
+ "content": "<|reserved_token_217|>",
1775
+ "lstrip": false,
1776
+ "normalized": false,
1777
+ "rstrip": false,
1778
+ "single_word": false,
1779
+ "special": true
1780
+ },
1781
+ "126302": {
1782
+ "content": "<|reserved_token_218|>",
1783
+ "lstrip": false,
1784
+ "normalized": false,
1785
+ "rstrip": false,
1786
+ "single_word": false,
1787
+ "special": true
1788
+ },
1789
+ "126303": {
1790
+ "content": "<|reserved_token_219|>",
1791
+ "lstrip": false,
1792
+ "normalized": false,
1793
+ "rstrip": false,
1794
+ "single_word": false,
1795
+ "special": true
1796
+ },
1797
+ "126304": {
1798
+ "content": "<|reserved_token_220|>",
1799
+ "lstrip": false,
1800
+ "normalized": false,
1801
+ "rstrip": false,
1802
+ "single_word": false,
1803
+ "special": true
1804
+ },
1805
+ "126305": {
1806
+ "content": "<|reserved_token_221|>",
1807
+ "lstrip": false,
1808
+ "normalized": false,
1809
+ "rstrip": false,
1810
+ "single_word": false,
1811
+ "special": true
1812
+ },
1813
+ "126306": {
1814
+ "content": "<|reserved_token_222|>",
1815
+ "lstrip": false,
1816
+ "normalized": false,
1817
+ "rstrip": false,
1818
+ "single_word": false,
1819
+ "special": true
1820
+ },
1821
+ "126307": {
1822
+ "content": "<|reserved_token_223|>",
1823
+ "lstrip": false,
1824
+ "normalized": false,
1825
+ "rstrip": false,
1826
+ "single_word": false,
1827
+ "special": true
1828
+ },
1829
+ "126308": {
1830
+ "content": "<|reserved_token_224|>",
1831
+ "lstrip": false,
1832
+ "normalized": false,
1833
+ "rstrip": false,
1834
+ "single_word": false,
1835
+ "special": true
1836
+ },
1837
+ "126309": {
1838
+ "content": "<|reserved_token_225|>",
1839
+ "lstrip": false,
1840
+ "normalized": false,
1841
+ "rstrip": false,
1842
+ "single_word": false,
1843
+ "special": true
1844
+ },
1845
+ "126310": {
1846
+ "content": "<|reserved_token_226|>",
1847
+ "lstrip": false,
1848
+ "normalized": false,
1849
+ "rstrip": false,
1850
+ "single_word": false,
1851
+ "special": true
1852
+ },
1853
+ "126311": {
1854
+ "content": "<|reserved_token_227|>",
1855
+ "lstrip": false,
1856
+ "normalized": false,
1857
+ "rstrip": false,
1858
+ "single_word": false,
1859
+ "special": true
1860
+ },
1861
+ "126312": {
1862
+ "content": "<|reserved_token_228|>",
1863
+ "lstrip": false,
1864
+ "normalized": false,
1865
+ "rstrip": false,
1866
+ "single_word": false,
1867
+ "special": true
1868
+ },
1869
+ "126313": {
1870
+ "content": "<|reserved_token_229|>",
1871
+ "lstrip": false,
1872
+ "normalized": false,
1873
+ "rstrip": false,
1874
+ "single_word": false,
1875
+ "special": true
1876
+ },
1877
+ "126314": {
1878
+ "content": "<|reserved_token_230|>",
1879
+ "lstrip": false,
1880
+ "normalized": false,
1881
+ "rstrip": false,
1882
+ "single_word": false,
1883
+ "special": true
1884
+ },
1885
+ "126315": {
1886
+ "content": "<|reserved_token_231|>",
1887
+ "lstrip": false,
1888
+ "normalized": false,
1889
+ "rstrip": false,
1890
+ "single_word": false,
1891
+ "special": true
1892
+ },
1893
+ "126316": {
1894
+ "content": "<|reserved_token_232|>",
1895
+ "lstrip": false,
1896
+ "normalized": false,
1897
+ "rstrip": false,
1898
+ "single_word": false,
1899
+ "special": true
1900
+ },
1901
+ "126317": {
1902
+ "content": "<|reserved_token_233|>",
1903
+ "lstrip": false,
1904
+ "normalized": false,
1905
+ "rstrip": false,
1906
+ "single_word": false,
1907
+ "special": true
1908
+ },
1909
+ "126318": {
1910
+ "content": "<|reserved_token_234|>",
1911
+ "lstrip": false,
1912
+ "normalized": false,
1913
+ "rstrip": false,
1914
+ "single_word": false,
1915
+ "special": true
1916
+ },
1917
+ "126319": {
1918
+ "content": "<|reserved_token_235|>",
1919
+ "lstrip": false,
1920
+ "normalized": false,
1921
+ "rstrip": false,
1922
+ "single_word": false,
1923
+ "special": true
1924
+ },
1925
+ "126320": {
1926
+ "content": "<|reserved_token_236|>",
1927
+ "lstrip": false,
1928
+ "normalized": false,
1929
+ "rstrip": false,
1930
+ "single_word": false,
1931
+ "special": true
1932
+ },
1933
+ "126321": {
1934
+ "content": "<|reserved_token_237|>",
1935
+ "lstrip": false,
1936
+ "normalized": false,
1937
+ "rstrip": false,
1938
+ "single_word": false,
1939
+ "special": true
1940
+ },
1941
+ "126322": {
1942
+ "content": "<|reserved_token_238|>",
1943
+ "lstrip": false,
1944
+ "normalized": false,
1945
+ "rstrip": false,
1946
+ "single_word": false,
1947
+ "special": true
1948
+ },
1949
+ "126323": {
1950
+ "content": "<|reserved_token_239|>",
1951
+ "lstrip": false,
1952
+ "normalized": false,
1953
+ "rstrip": false,
1954
+ "single_word": false,
1955
+ "special": true
1956
+ },
1957
+ "126324": {
1958
+ "content": "<|reserved_token_240|>",
1959
+ "lstrip": false,
1960
+ "normalized": false,
1961
+ "rstrip": false,
1962
+ "single_word": false,
1963
+ "special": true
1964
+ },
1965
+ "126325": {
1966
+ "content": "<|reserved_token_241|>",
1967
+ "lstrip": false,
1968
+ "normalized": false,
1969
+ "rstrip": false,
1970
+ "single_word": false,
1971
+ "special": true
1972
+ },
1973
+ "126326": {
1974
+ "content": "<|reserved_token_242|>",
1975
+ "lstrip": false,
1976
+ "normalized": false,
1977
+ "rstrip": false,
1978
+ "single_word": false,
1979
+ "special": true
1980
+ },
1981
+ "126327": {
1982
+ "content": "<|reserved_token_243|>",
1983
+ "lstrip": false,
1984
+ "normalized": false,
1985
+ "rstrip": false,
1986
+ "single_word": false,
1987
+ "special": true
1988
+ },
1989
+ "126328": {
1990
+ "content": "<|reserved_token_244|>",
1991
+ "lstrip": false,
1992
+ "normalized": false,
1993
+ "rstrip": false,
1994
+ "single_word": false,
1995
+ "special": true
1996
+ },
1997
+ "126329": {
1998
+ "content": "<|reserved_token_245|>",
1999
+ "lstrip": false,
2000
+ "normalized": false,
2001
+ "rstrip": false,
2002
+ "single_word": false,
2003
+ "special": true
2004
+ },
2005
+ "126330": {
2006
+ "content": "<|reserved_token_246|>",
2007
+ "lstrip": false,
2008
+ "normalized": false,
2009
+ "rstrip": false,
2010
+ "single_word": false,
2011
+ "special": true
2012
+ },
2013
+ "126331": {
2014
+ "content": "<|reserved_token_247|>",
2015
+ "lstrip": false,
2016
+ "normalized": false,
2017
+ "rstrip": false,
2018
+ "single_word": false,
2019
+ "special": true
2020
+ },
2021
+ "126332": {
2022
+ "content": "<|reserved_token_248|>",
2023
+ "lstrip": false,
2024
+ "normalized": false,
2025
+ "rstrip": false,
2026
+ "single_word": false,
2027
+ "special": true
2028
+ },
2029
+ "126333": {
2030
+ "content": "<|reserved_token_249|>",
2031
+ "lstrip": false,
2032
+ "normalized": false,
2033
+ "rstrip": false,
2034
+ "single_word": false,
2035
+ "special": true
2036
+ },
2037
+ "126334": {
2038
+ "content": "<|reserved_token_250|>",
2039
+ "lstrip": false,
2040
+ "normalized": false,
2041
+ "rstrip": false,
2042
+ "single_word": false,
2043
+ "special": true
2044
+ },
2045
+ "126335": {
2046
+ "content": "<|reserved_token_251|>",
2047
+ "lstrip": false,
2048
+ "normalized": false,
2049
+ "rstrip": false,
2050
+ "single_word": false,
2051
+ "special": true
2052
+ },
2053
+ "126336": {
2054
+ "content": "<|mdm_mask|>",
2055
+ "lstrip": false,
2056
+ "normalized": false,
2057
+ "rstrip": false,
2058
+ "single_word": false,
2059
+ "special": true
2060
+ },
2061
+ "126337": {
2062
+ "content": "<|reserved_token_253|>",
2063
+ "lstrip": false,
2064
+ "normalized": false,
2065
+ "rstrip": false,
2066
+ "single_word": false,
2067
+ "special": true
2068
+ },
2069
+ "126338": {
2070
+ "content": "<|reserved_token_254|>",
2071
+ "lstrip": false,
2072
+ "normalized": false,
2073
+ "rstrip": false,
2074
+ "single_word": false,
2075
+ "special": true
2076
+ },
2077
+ "126339": {
2078
+ "content": "<|reserved_token_255|>",
2079
+ "lstrip": false,
2080
+ "normalized": false,
2081
+ "rstrip": false,
2082
+ "single_word": false,
2083
+ "special": true
2084
+ },
2085
+ "126340": {
2086
+ "content": "<role>",
2087
+ "lstrip": false,
2088
+ "normalized": false,
2089
+ "rstrip": false,
2090
+ "single_word": false,
2091
+ "special": true
2092
+ },
2093
+ "126341": {
2094
+ "content": "</role>",
2095
+ "lstrip": false,
2096
+ "normalized": false,
2097
+ "rstrip": false,
2098
+ "single_word": false,
2099
+ "special": true
2100
+ },
2101
+ "126342": {
2102
+ "content": "<|arithmetic_start|>",
2103
+ "lstrip": false,
2104
+ "normalized": false,
2105
+ "rstrip": false,
2106
+ "single_word": false,
2107
+ "special": true
2108
+ },
2109
+ "126343": {
2110
+ "content": "<|arithmetic_end|>",
2111
+ "lstrip": false,
2112
+ "normalized": false,
2113
+ "rstrip": false,
2114
+ "single_word": false,
2115
+ "special": true
2116
+ },
2117
+ "126344": {
2118
+ "content": "<|number_start|>",
2119
+ "lstrip": false,
2120
+ "normalized": false,
2121
+ "rstrip": false,
2122
+ "single_word": false,
2123
+ "special": true
2124
+ },
2125
+ "126345": {
2126
+ "content": "<|number_end|>",
2127
+ "lstrip": false,
2128
+ "normalized": false,
2129
+ "rstrip": false,
2130
+ "single_word": false,
2131
+ "special": true
2132
+ },
2133
+ "126346": {
2134
+ "content": "<|start_header_id|>",
2135
+ "lstrip": false,
2136
+ "normalized": false,
2137
+ "rstrip": false,
2138
+ "single_word": false,
2139
+ "special": true
2140
+ },
2141
+ "126347": {
2142
+ "content": "<|end_header_id|>",
2143
+ "lstrip": false,
2144
+ "normalized": false,
2145
+ "rstrip": false,
2146
+ "single_word": false,
2147
+ "special": true
2148
+ },
2149
+ "126348": {
2150
+ "content": "<|eot_id|>",
2151
+ "lstrip": false,
2152
+ "normalized": false,
2153
+ "rstrip": false,
2154
+ "single_word": false,
2155
+ "special": true
2156
+ }
2157
+ },
2158
+ "additional_special_tokens": [
2159
+ "<role>",
2160
+ "</role>",
2161
+ "<|arithmetic_start|>",
2162
+ "<|arithmetic_end|>",
2163
+ "<|number_start|>",
2164
+ "<|number_end|>"
2165
+ ],
2166
+ "bos_token": "<|startoftext|>",
2167
+ "clean_up_tokenization_spaces": false,
2168
+ "cls_token": "[CLS]",
2169
+ "eos_token": "<|endoftext|>",
2170
+ "extra_special_tokens": {},
2171
+ "fast_tokenizer": true,
2172
+ "gmask_token": "[gMASK]",
2173
+ "merges_file": null,
2174
+ "model_input_names": [
2175
+ "input_ids",
2176
+ "attention_mask"
2177
+ ],
2178
+ "model_max_length": 1000000000000000019884624838656,
2179
+ "pad_token": "<|endoftext|>",
2180
+ "tokenizer_class": "PreTrainedTokenizerFast",
2181
+ "trust_remote_code": true
2182
+ }