LoganResearch commited on
Commit
720d5ab
·
verified ·
1 Parent(s): e27c8d7

Upload folder using huggingface_hub

Browse files
base_model/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
base_model/README.md ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ new_version: tiiuae/Falcon3-Mamba-7B-Instruct
3
+ datasets:
4
+ - tiiuae/falcon-refinedweb
5
+ - HuggingFaceFW/fineweb-edu
6
+ language:
7
+ - en
8
+ license: other
9
+ license_name: falcon-mamba-7b-license
10
+ license_link: https://falconllm.tii.ae/falcon-mamba-7b-terms-and-conditions.html
11
+ base_model: tiiuae/falcon-mamba-7b
12
+ pipeline_tag: text-generation
13
+ inference: true
14
+ ---
15
+
16
+ <img src="https://huggingface.co/datasets/tiiuae/documentation-images/resolve/main/falcon_mamba/thumbnail.png" alt="drawing" width="800"/>
17
+
18
+ **Model card for FalconMamba Instruct model**
19
+
20
+ # Table of Contents
21
+
22
+ 0. [TL;DR](#TL;DR)
23
+ 1. [Model Details](#model-details)
24
+ 2. [Usage](#usage)
25
+ 3. [Training Details](#training-details)
26
+ 4. [Evaluation](#evaluation)
27
+
28
+
29
+ # TL;DR
30
+
31
+ # Model Details
32
+
33
+ ## Model Description
34
+
35
+ - **Developed by:** [https://www.tii.ae](https://www.tii.ae)
36
+ - **Model type:** Causal decoder-only
37
+ - **Architecture:** Mamba
38
+ - **Language(s) (NLP):** Mainly English
39
+ - **License:** TII Falcon-Mamba License 2.0
40
+
41
+ <br>
42
+
43
+ Check out [the blogpost](https://huggingface.co/blog/falconmamba) for more details!
44
+
45
+ # Usage
46
+
47
+ Find below some example scripts on how to use the model in `transformers` (Make sure to have the latest transformers, or the one built from source):
48
+
49
+ ## Using the Pytorch model
50
+
51
+ ### Running the model on a CPU
52
+
53
+ <details>
54
+ <summary> Click to expand </summary>
55
+
56
+ ```python
57
+ from transformers import AutoTokenizer, AutoModelForCausalLM
58
+
59
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b-instruct")
60
+ model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b-instruct")
61
+
62
+ # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
63
+ messages = [
64
+ {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
65
+ ]
66
+
67
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
68
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids
69
+
70
+ outputs = model.generate(input_ids, max_new_tokens=30)
71
+ print(tokenizer.decode(outputs[0]))
72
+ ```
73
+
74
+ </details>
75
+
76
+ ### Running the model on a GPU
77
+
78
+ <details>
79
+ <summary> Click to expand </summary>
80
+
81
+ ```python
82
+ # pip install accelerate
83
+ from transformers import AutoTokenizer, AutoModelForCausalLM
84
+
85
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b-instruct")
86
+ model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b-instruct", device_map="auto")
87
+
88
+ # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
89
+ messages = [
90
+ {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
91
+ ]
92
+
93
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
94
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
95
+
96
+ outputs = model.generate(input_ids, max_new_tokens=30)
97
+ print(tokenizer.decode(outputs[0]))
98
+ ```
99
+
100
+ </details>
101
+
102
+ ### Running the model on a GPU using `torch.compile`
103
+
104
+ <details>
105
+ <summary> Click to expand </summary>
106
+
107
+ ```python
108
+ import torch
109
+ from transformers import AutoTokenizer, AutoModelForCausalLM
110
+
111
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b-instruct")
112
+ model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b-instruct", torch_dtype=torch.bfloat16).to(0)
113
+
114
+ model = torch.compile(model)
115
+
116
+ # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
117
+ messages = [
118
+ {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
119
+ ]
120
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
121
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
122
+
123
+ outputs = model.generate(input_ids, max_new_tokens=30)
124
+ print(tokenizer.decode(outputs[0]))
125
+ ```
126
+
127
+ </details>
128
+
129
+
130
+ ### Running the model on a GPU using different precisions
131
+
132
+ #### FP16
133
+
134
+ <details>
135
+ <summary> Click to expand </summary>
136
+
137
+ ```python
138
+ # pip install accelerate
139
+ import torch
140
+ from transformers import AutoTokenizer, AutoModelForCausalLM
141
+
142
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b-instruct")
143
+ model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b-instruct", device_map="auto", torch_dtype=torch.float16)
144
+
145
+ # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
146
+ messages = [
147
+ {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
148
+ ]
149
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
150
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
151
+
152
+ outputs = model.generate(input_ids, max_new_tokens=30)
153
+ print(tokenizer.decode(outputs[0]))
154
+ ```
155
+
156
+ </details>
157
+
158
+ #### 4-bit
159
+
160
+ <details>
161
+ <summary> Click to expand </summary>
162
+
163
+ ```python
164
+ # pip install bitsandbytes accelerate
165
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
166
+
167
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b-instruct")
168
+ model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b-instruct", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True))
169
+
170
+ # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
171
+ messages = [
172
+ {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
173
+ ]
174
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
175
+ input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda")
176
+
177
+ outputs = model.generate(input_ids, max_new_tokens=30)
178
+ print(tokenizer.decode(outputs[0]))
179
+ ```
180
+
181
+ </details>
182
+
183
+ <br>
184
+
185
+ # Training Details
186
+
187
+ ## Training Data
188
+
189
+ Falcon-Mamba has been trained with ~ 5,500 GT mainly coming from [Refined-Web](https://huggingface.co/datasets/tiiuae/falcon-refinedweb), a large volume web-only dataset filtered and deduplicated.
190
+ Similar to the others [Falcon](https://huggingface.co/tiiuae/falcon-11B) suite models, Falcon-Mamba has been trained leveraging a multi-stage training strategy to increase the context-length from 2,048 to 8,192.
191
+ Moreover, inspired by the concept of Curriculum Learning, we carefully selected data mixtures throughout the training stages, considering both data diversity and complexity.
192
+ Note that at inference the context-length is not relevant as the Mamba architecture has no limit on long range dependency.
193
+ At the last training stage, small portion of high-quality curated data was used to further enhance performance.
194
+
195
+ Overall, the data sources included RefinedWeb-English, high quality technical data, code data and math data extracted from public sources.
196
+ In particular, we used samples coming from [Fineweb-edu](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu) during our last training stage.
197
+
198
+ The data was tokenized with the Falcon-[7B](https://huggingface.co/tiiuae/falcon-7B)/[11B](https://huggingface.co/tiiuae/falcon-11B) tokenizer.
199
+
200
+ After pre-training, the model has been further fine-tuned on instruction data.
201
+
202
+ ## Training Procedure
203
+ Falcon-Mamba-7B was trained on 256 H100 80GB GPUs for the majority of the training, using a 3D parallelism strategy (TP=1, PP=1, DP=256) combined with ZeRO.
204
+
205
+ ### Training Hyperparameters
206
+
207
+ | **Hyperparameter** | **Value** | **Comment** |
208
+ |--------------------|------------|-------------------------------------------|
209
+ | Precision | `bfloat16` | |
210
+ | Optimizer | AdamW | |
211
+ | Max learning rate | 6.4e-4 | Following a WSD (warmup-stable-decay) learning rate schedule |
212
+ | Weight decay | 1e-1 | |
213
+ | Batch size | 2048 | |
214
+
215
+
216
+ The model was trained AdamW optimizer, WSD (warmup-stable-decay) learning rate schedule, and a batch size rampup from \\(b_{\mathrm{min}}=128\\) to \\(b_{\mathrm{max}}=2048\\) during first 50 GT of training.
217
+ In the stable phase we used maximal learning rate \\(\eta_{\mathrm{max}}=6.4 \times 10^{-4}\\), and decayed it to the minimal value \\(\eta_{\mathrm{min}}=\frac{\eta_{\mathrm{max}}}{256}\\) with exponential schedule over 500 GT.
218
+ Also, we applied *BatchScaling* during the rampup — rescaling learning rate \\(\eta\\) so that the Adam noise temperature \\(T_{\mathrm{noise}}\equiv\frac{\eta}{\sqrt{b}}\\) is kept constant.
219
+
220
+ ### Speeds, Sizes, Times
221
+
222
+ The model training took roughly two months.
223
+
224
+ <br>
225
+
226
+ # Evaluation
227
+
228
+ ## Benchmarks
229
+
230
+ We evaluate our model on all benchmarks of the new leaderboard's version using the `lm-evaluation-harness` package, and then normalize the evaluation results with HuggingFace score normalization.
231
+
232
+
233
+ | `model name` |`IFEval`| `BBH` |`MATH LvL5`| `GPQA`| `MUSR`|`MMLU-PRO`|`Average`|
234
+ |:--------------------------|:------:|:-----:|:---------:|:-----:|:-----:|:--------:|:-------:|
235
+ | ***Pure SSM models*** | | | | | | | |
236
+ | `FalconMamba-7B` | 33.36 | 19.88 | 3.63 |8.05 |10.86 | 14.47 |**15.04**|
237
+ | `TRI-ML/mamba-7b-rw`<sup>*</sup>| 22.46 | 6.71 | 0.45 | 1.12 | 5.51 | 1.69 | 6.25 |
238
+ |***Hybrid SSM-attention models*** | | | | | | |
239
+ |`recurrentgemma-9b` | 30.76 | 14.80 | 4.83 | 4.70 | 6.60 | 17.88 | 13.20 |
240
+ | `Zyphra/Zamba-7B-v1`<sup>*</sup> | 24.06 | 21.12 | 3.32 | 3.03 | 7.74 | 16.02 | 12.55 |
241
+ |***Transformer models*** | | | | | | | |
242
+ | `Falcon2-11B` | 32.61 | 21.94 | 2.34 | 2.80 | 7.53 | 15.44 | 13.78 |
243
+ | `Meta-Llama-3-8B` | 14.55 | 24.50 | 3.25 | 7.38 | 6.24 | 24.55 | 13.41 |
244
+ | `Meta-Llama-3.1-8B` | 12.70 | 25.29 | 4.61 | 6.15 | 8.98 | 24.95 | 13.78 |
245
+ | `Mistral-7B-v0.1` | 23.86 | 22.02 | 2.49 | 5.59 | 10.68 | 22.36 | 14.50 |
246
+ | `Mistral-Nemo-Base-2407 (12B)` | 16.83 | 29.37 | 4.98 | 5.82 | 6.52 | 27.46 | 15.08 |
247
+ | `gemma-7B` | 26.59 | 21.12 | 6.42 | 4.92 | 10.98 | 21.64 |**15.28**|
248
+
249
+
250
+ Also, we evaluate our model on the benchmarks of the first leaderboard using `lighteval`.
251
+
252
+
253
+ | `model name` |`ARC`|`HellaSwag` |`MMLU` |`Winogrande`|`TruthfulQA`|`GSM8K`|`Average` |
254
+ |:-----------------------------|:------:|:---------:|:-----:|:----------:|:----------:|:-----:|:----------------:|
255
+ | ***Pure SSM models*** | | | | | | | |
256
+ | `FalconMamba-7B`<sup>*</sup> | 62.03 | 80.82 | 62.11 | 73.64 | 53.42 | 52.54 | **64.09** |
257
+ | `TRI-ML/mamba-7b-rw`<sup>*</sup> | 51.25 | 80.85 | 33.41 | 71.11 | 32.08 | 4.70 | 45.52 |
258
+ |***Hybrid SSM-attention models***| | | | | | | |
259
+ | `recurrentgemma-9b`<sup>**</sup> |52.00 | 80.40 | 60.50 | 73.60 | 38.60 | 42.60 | 57.95 |
260
+ | `Zyphra/Zamba-7B-v1`<sup>*</sup> | 56.14 | 82.23 | 58.11 | 79.87 | 52.88 | 30.78 | 60.00 |
261
+ |***Transformer models*** | | | | | | | |
262
+ | `Falcon2-11B` | 59.73 | 82.91 | 58.37 | 78.30 | 52.56 | 53.83 | **64.28** |
263
+ | `Meta-Llama-3-8B` | 60.24 | 82.23 | 66.70 | 78.45 | 42.93 | 45.19 | 62.62 |
264
+ | `Meta-Llama-3.1-8B` | 58.53 | 82.13 | 66.43 | 74.35 | 44.29 | 47.92 | 62.28 |
265
+ | `Mistral-7B-v0.1` | 59.98 | 83.31 | 64.16 | 78.37 | 42.15 | 37.83 | 60.97 |
266
+ | `gemma-7B` | 61.09 | 82.20 | 64.56 | 79.01 | 44.79 | 50.87 | 63.75 |
267
+
268
+ Mostly, we took evaluation results from both leaderboards. For the models marked by *star* we evaluated the tasks internally, while for the models marked by two *stars* the results were taken from paper or model card.
269
+
270
+ ## Throughput
271
+
272
+ This model can achieve comparable throughput and performance compared to other transformer based models that use optimized kernels such as Flash Attention 2. Make sure to install the optimized Mamba kernels with the following commands:
273
+
274
+ ```bash
275
+ pip install "causal-conv1d>=1.4.0" mamba-ssm
276
+ ```
277
+
278
+ Refer to our [FalconMamba blogpost](https://huggingface.co/blog/falconmamba) for more details about performance evaluation.
279
+
280
+
281
+ <br>
282
+
283
+ # Technical Specifications
284
+
285
+ ## Model Architecture and Objective
286
+
287
+ Falcon-Mamba-7B is a causal decoder-only model trained on a causal language modeling task (i.e., predict the next token).
288
+
289
+ The model is based on the Mamba architecture ([Gu et al., 2023](https://arxiv.org/abs/2312.00752)).
290
+
291
+ | **Hyperparameter** | **Value** | **Comment** |
292
+ |--------------------|-----------|----------------------------------------|
293
+ | Layers | 64 | Number of layers |
294
+ | `d_model` | 4096 | Hidden dimension |
295
+ | `d_state` | 16 | The SSM state dimension |
296
+ | Vocabulary | 65024 | Vocabulary Size |
297
+ | Sequence length | 8192 | During the last training stages |
298
+
299
+ ## Compute Infrastructure
300
+
301
+ ### Hardware
302
+
303
+ Falcon-Mamba-7B was trained on AWS SageMaker, using on average 256 H100 80GB GPUs in 32 p5 instances.
304
+
305
+ ### Software
306
+
307
+ Falcon-Mamba-7B was trained on an internal distributed training codebase, Gigatron. It uses a 3D parallelism approach combined with ZeRO, high-performance Triton kernels.
308
+
309
+ <br>
310
+
311
+ # Citation
312
+
313
+ You can use the following bibtex citation:
314
+ ```
315
+ @misc{zuo2024falconmambacompetitiveattentionfree,
316
+ title={Falcon Mamba: The First Competitive Attention-free 7B Language Model},
317
+ author={Jingwei Zuo and Maksim Velikanov and Dhia Eddine Rhaiem and Ilyas Chahed and Younes Belkada and Guillaume Kunsch and Hakim Hacid},
318
+ year={2024},
319
+ eprint={2410.05355},
320
+ archivePrefix={arXiv},
321
+ primaryClass={cs.CL},
322
+ url={https://arxiv.org/abs/2410.05355},
323
+ }
324
+ ```
base_model/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "tiiuae/falcon-mamba-7b-chat",
3
+ "architectures": [
4
+ "FalconMambaForCausalLM"
5
+ ],
6
+ "bos_token_id": 8,
7
+ "conv_kernel": 4,
8
+ "eos_token_id": 11,
9
+ "expand": 16,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.1,
13
+ "intermediate_size": 8192,
14
+ "layer_norm_epsilon": 1e-05,
15
+ "model_type": "falcon_mamba",
16
+ "num_hidden_layers": 64,
17
+ "pad_token_id": 0,
18
+ "rescale_prenorm_residual": false,
19
+ "residual_in_fp32": true,
20
+ "state_size": 16,
21
+ "tie_word_embeddings": false,
22
+ "time_step_floor": 0.0001,
23
+ "time_step_init_scheme": "random",
24
+ "time_step_max": 0.1,
25
+ "time_step_min": 0.001,
26
+ "time_step_rank": 256,
27
+ "time_step_scale": 1.0,
28
+ "torch_dtype": "bfloat16",
29
+ "transformers_version": "4.44.0.dev0",
30
+ "use_bias": false,
31
+ "use_cache": true,
32
+ "use_conv_bias": true,
33
+ "use_mambapy": false,
34
+ "vocab_size": 65024
35
+ }
base_model/generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 8,
4
+ "eos_token_id": [
5
+ 11,
6
+ 10
7
+ ],
8
+ "pad_token_id": 0,
9
+ "transformers_version": "4.44.0.dev0"
10
+ }
base_model/model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f44b8d6bf70728703ee89acfdc4ba5c0b86dbcf59d00ae07692dc943a682caf0
3
+ size 4956184024
base_model/model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc5fe83e4f7eee28ed1717aaae90cf67931c4551a927b48026742782064a86da
3
+ size 4987536920
base_model/model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8044847d7225503eaa76ca48df04685c56c8aff0a4350761f309708477434efd
3
+ size 4601680888
base_model/model.safetensors.index.json ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14545330176
4
+ },
5
+ "weight_map": {
6
+ "backbone.embeddings.weight": "model-00001-of-00003.safetensors",
7
+ "backbone.layers.0.mixer.A_log": "model-00001-of-00003.safetensors",
8
+ "backbone.layers.0.mixer.D": "model-00001-of-00003.safetensors",
9
+ "backbone.layers.0.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
10
+ "backbone.layers.0.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
11
+ "backbone.layers.0.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
12
+ "backbone.layers.0.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
13
+ "backbone.layers.0.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
14
+ "backbone.layers.0.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
15
+ "backbone.layers.0.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
16
+ "backbone.layers.0.norm.weight": "model-00001-of-00003.safetensors",
17
+ "backbone.layers.1.mixer.A_log": "model-00001-of-00003.safetensors",
18
+ "backbone.layers.1.mixer.D": "model-00001-of-00003.safetensors",
19
+ "backbone.layers.1.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
20
+ "backbone.layers.1.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
21
+ "backbone.layers.1.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
22
+ "backbone.layers.1.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
23
+ "backbone.layers.1.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
24
+ "backbone.layers.1.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
25
+ "backbone.layers.1.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
26
+ "backbone.layers.1.norm.weight": "model-00001-of-00003.safetensors",
27
+ "backbone.layers.10.mixer.A_log": "model-00001-of-00003.safetensors",
28
+ "backbone.layers.10.mixer.D": "model-00001-of-00003.safetensors",
29
+ "backbone.layers.10.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
30
+ "backbone.layers.10.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
31
+ "backbone.layers.10.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
32
+ "backbone.layers.10.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
33
+ "backbone.layers.10.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
34
+ "backbone.layers.10.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
35
+ "backbone.layers.10.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
36
+ "backbone.layers.10.norm.weight": "model-00001-of-00003.safetensors",
37
+ "backbone.layers.11.mixer.A_log": "model-00001-of-00003.safetensors",
38
+ "backbone.layers.11.mixer.D": "model-00001-of-00003.safetensors",
39
+ "backbone.layers.11.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
40
+ "backbone.layers.11.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
41
+ "backbone.layers.11.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
42
+ "backbone.layers.11.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
43
+ "backbone.layers.11.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
44
+ "backbone.layers.11.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
45
+ "backbone.layers.11.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
46
+ "backbone.layers.11.norm.weight": "model-00001-of-00003.safetensors",
47
+ "backbone.layers.12.mixer.A_log": "model-00001-of-00003.safetensors",
48
+ "backbone.layers.12.mixer.D": "model-00001-of-00003.safetensors",
49
+ "backbone.layers.12.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
50
+ "backbone.layers.12.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
51
+ "backbone.layers.12.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
52
+ "backbone.layers.12.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
53
+ "backbone.layers.12.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
54
+ "backbone.layers.12.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
55
+ "backbone.layers.12.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
56
+ "backbone.layers.12.norm.weight": "model-00001-of-00003.safetensors",
57
+ "backbone.layers.13.mixer.A_log": "model-00001-of-00003.safetensors",
58
+ "backbone.layers.13.mixer.D": "model-00001-of-00003.safetensors",
59
+ "backbone.layers.13.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
60
+ "backbone.layers.13.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
61
+ "backbone.layers.13.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
62
+ "backbone.layers.13.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
63
+ "backbone.layers.13.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
64
+ "backbone.layers.13.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
65
+ "backbone.layers.13.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
66
+ "backbone.layers.13.norm.weight": "model-00001-of-00003.safetensors",
67
+ "backbone.layers.14.mixer.A_log": "model-00001-of-00003.safetensors",
68
+ "backbone.layers.14.mixer.D": "model-00001-of-00003.safetensors",
69
+ "backbone.layers.14.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
70
+ "backbone.layers.14.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
71
+ "backbone.layers.14.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
72
+ "backbone.layers.14.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
73
+ "backbone.layers.14.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
74
+ "backbone.layers.14.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
75
+ "backbone.layers.14.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
76
+ "backbone.layers.14.norm.weight": "model-00001-of-00003.safetensors",
77
+ "backbone.layers.15.mixer.A_log": "model-00001-of-00003.safetensors",
78
+ "backbone.layers.15.mixer.D": "model-00001-of-00003.safetensors",
79
+ "backbone.layers.15.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
80
+ "backbone.layers.15.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
81
+ "backbone.layers.15.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
82
+ "backbone.layers.15.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
83
+ "backbone.layers.15.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
84
+ "backbone.layers.15.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
85
+ "backbone.layers.15.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
86
+ "backbone.layers.15.norm.weight": "model-00001-of-00003.safetensors",
87
+ "backbone.layers.16.mixer.A_log": "model-00001-of-00003.safetensors",
88
+ "backbone.layers.16.mixer.D": "model-00001-of-00003.safetensors",
89
+ "backbone.layers.16.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
90
+ "backbone.layers.16.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
91
+ "backbone.layers.16.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
92
+ "backbone.layers.16.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
93
+ "backbone.layers.16.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
94
+ "backbone.layers.16.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
95
+ "backbone.layers.16.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
96
+ "backbone.layers.16.norm.weight": "model-00001-of-00003.safetensors",
97
+ "backbone.layers.17.mixer.A_log": "model-00001-of-00003.safetensors",
98
+ "backbone.layers.17.mixer.D": "model-00001-of-00003.safetensors",
99
+ "backbone.layers.17.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
100
+ "backbone.layers.17.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
101
+ "backbone.layers.17.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
102
+ "backbone.layers.17.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
103
+ "backbone.layers.17.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
104
+ "backbone.layers.17.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
105
+ "backbone.layers.17.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
106
+ "backbone.layers.17.norm.weight": "model-00001-of-00003.safetensors",
107
+ "backbone.layers.18.mixer.A_log": "model-00001-of-00003.safetensors",
108
+ "backbone.layers.18.mixer.D": "model-00001-of-00003.safetensors",
109
+ "backbone.layers.18.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
110
+ "backbone.layers.18.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
111
+ "backbone.layers.18.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
112
+ "backbone.layers.18.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
113
+ "backbone.layers.18.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
114
+ "backbone.layers.18.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
115
+ "backbone.layers.18.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
116
+ "backbone.layers.18.norm.weight": "model-00001-of-00003.safetensors",
117
+ "backbone.layers.19.mixer.A_log": "model-00001-of-00003.safetensors",
118
+ "backbone.layers.19.mixer.D": "model-00001-of-00003.safetensors",
119
+ "backbone.layers.19.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
120
+ "backbone.layers.19.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
121
+ "backbone.layers.19.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
122
+ "backbone.layers.19.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
123
+ "backbone.layers.19.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
124
+ "backbone.layers.19.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
125
+ "backbone.layers.19.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
126
+ "backbone.layers.19.norm.weight": "model-00001-of-00003.safetensors",
127
+ "backbone.layers.2.mixer.A_log": "model-00001-of-00003.safetensors",
128
+ "backbone.layers.2.mixer.D": "model-00001-of-00003.safetensors",
129
+ "backbone.layers.2.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
130
+ "backbone.layers.2.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
131
+ "backbone.layers.2.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
132
+ "backbone.layers.2.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
133
+ "backbone.layers.2.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
134
+ "backbone.layers.2.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
135
+ "backbone.layers.2.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
136
+ "backbone.layers.2.norm.weight": "model-00001-of-00003.safetensors",
137
+ "backbone.layers.20.mixer.A_log": "model-00001-of-00003.safetensors",
138
+ "backbone.layers.20.mixer.D": "model-00001-of-00003.safetensors",
139
+ "backbone.layers.20.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
140
+ "backbone.layers.20.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
141
+ "backbone.layers.20.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
142
+ "backbone.layers.20.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
143
+ "backbone.layers.20.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
144
+ "backbone.layers.20.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
145
+ "backbone.layers.20.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
146
+ "backbone.layers.20.norm.weight": "model-00001-of-00003.safetensors",
147
+ "backbone.layers.21.mixer.A_log": "model-00001-of-00003.safetensors",
148
+ "backbone.layers.21.mixer.D": "model-00001-of-00003.safetensors",
149
+ "backbone.layers.21.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
150
+ "backbone.layers.21.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
151
+ "backbone.layers.21.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
152
+ "backbone.layers.21.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
153
+ "backbone.layers.21.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
154
+ "backbone.layers.21.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
155
+ "backbone.layers.21.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
156
+ "backbone.layers.21.norm.weight": "model-00001-of-00003.safetensors",
157
+ "backbone.layers.22.mixer.A_log": "model-00002-of-00003.safetensors",
158
+ "backbone.layers.22.mixer.D": "model-00002-of-00003.safetensors",
159
+ "backbone.layers.22.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
160
+ "backbone.layers.22.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
161
+ "backbone.layers.22.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
162
+ "backbone.layers.22.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
163
+ "backbone.layers.22.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
164
+ "backbone.layers.22.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
165
+ "backbone.layers.22.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
166
+ "backbone.layers.22.norm.weight": "model-00002-of-00003.safetensors",
167
+ "backbone.layers.23.mixer.A_log": "model-00002-of-00003.safetensors",
168
+ "backbone.layers.23.mixer.D": "model-00002-of-00003.safetensors",
169
+ "backbone.layers.23.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
170
+ "backbone.layers.23.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
171
+ "backbone.layers.23.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
172
+ "backbone.layers.23.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
173
+ "backbone.layers.23.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
174
+ "backbone.layers.23.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
175
+ "backbone.layers.23.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
176
+ "backbone.layers.23.norm.weight": "model-00002-of-00003.safetensors",
177
+ "backbone.layers.24.mixer.A_log": "model-00002-of-00003.safetensors",
178
+ "backbone.layers.24.mixer.D": "model-00002-of-00003.safetensors",
179
+ "backbone.layers.24.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
180
+ "backbone.layers.24.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
181
+ "backbone.layers.24.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
182
+ "backbone.layers.24.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
183
+ "backbone.layers.24.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
184
+ "backbone.layers.24.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
185
+ "backbone.layers.24.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
186
+ "backbone.layers.24.norm.weight": "model-00002-of-00003.safetensors",
187
+ "backbone.layers.25.mixer.A_log": "model-00002-of-00003.safetensors",
188
+ "backbone.layers.25.mixer.D": "model-00002-of-00003.safetensors",
189
+ "backbone.layers.25.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
190
+ "backbone.layers.25.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
191
+ "backbone.layers.25.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
192
+ "backbone.layers.25.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
193
+ "backbone.layers.25.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
194
+ "backbone.layers.25.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
195
+ "backbone.layers.25.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
196
+ "backbone.layers.25.norm.weight": "model-00002-of-00003.safetensors",
197
+ "backbone.layers.26.mixer.A_log": "model-00002-of-00003.safetensors",
198
+ "backbone.layers.26.mixer.D": "model-00002-of-00003.safetensors",
199
+ "backbone.layers.26.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
200
+ "backbone.layers.26.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
201
+ "backbone.layers.26.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
202
+ "backbone.layers.26.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
203
+ "backbone.layers.26.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
204
+ "backbone.layers.26.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
205
+ "backbone.layers.26.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
206
+ "backbone.layers.26.norm.weight": "model-00002-of-00003.safetensors",
207
+ "backbone.layers.27.mixer.A_log": "model-00002-of-00003.safetensors",
208
+ "backbone.layers.27.mixer.D": "model-00002-of-00003.safetensors",
209
+ "backbone.layers.27.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
210
+ "backbone.layers.27.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
211
+ "backbone.layers.27.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
212
+ "backbone.layers.27.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
213
+ "backbone.layers.27.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
214
+ "backbone.layers.27.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
215
+ "backbone.layers.27.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
216
+ "backbone.layers.27.norm.weight": "model-00002-of-00003.safetensors",
217
+ "backbone.layers.28.mixer.A_log": "model-00002-of-00003.safetensors",
218
+ "backbone.layers.28.mixer.D": "model-00002-of-00003.safetensors",
219
+ "backbone.layers.28.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
220
+ "backbone.layers.28.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
221
+ "backbone.layers.28.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
222
+ "backbone.layers.28.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
223
+ "backbone.layers.28.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
224
+ "backbone.layers.28.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
225
+ "backbone.layers.28.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
226
+ "backbone.layers.28.norm.weight": "model-00002-of-00003.safetensors",
227
+ "backbone.layers.29.mixer.A_log": "model-00002-of-00003.safetensors",
228
+ "backbone.layers.29.mixer.D": "model-00002-of-00003.safetensors",
229
+ "backbone.layers.29.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
230
+ "backbone.layers.29.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
231
+ "backbone.layers.29.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
232
+ "backbone.layers.29.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
233
+ "backbone.layers.29.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
234
+ "backbone.layers.29.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
235
+ "backbone.layers.29.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
236
+ "backbone.layers.29.norm.weight": "model-00002-of-00003.safetensors",
237
+ "backbone.layers.3.mixer.A_log": "model-00001-of-00003.safetensors",
238
+ "backbone.layers.3.mixer.D": "model-00001-of-00003.safetensors",
239
+ "backbone.layers.3.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
240
+ "backbone.layers.3.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
241
+ "backbone.layers.3.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
242
+ "backbone.layers.3.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
243
+ "backbone.layers.3.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
244
+ "backbone.layers.3.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
245
+ "backbone.layers.3.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
246
+ "backbone.layers.3.norm.weight": "model-00001-of-00003.safetensors",
247
+ "backbone.layers.30.mixer.A_log": "model-00002-of-00003.safetensors",
248
+ "backbone.layers.30.mixer.D": "model-00002-of-00003.safetensors",
249
+ "backbone.layers.30.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
250
+ "backbone.layers.30.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
251
+ "backbone.layers.30.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
252
+ "backbone.layers.30.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
253
+ "backbone.layers.30.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
254
+ "backbone.layers.30.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
255
+ "backbone.layers.30.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
256
+ "backbone.layers.30.norm.weight": "model-00002-of-00003.safetensors",
257
+ "backbone.layers.31.mixer.A_log": "model-00002-of-00003.safetensors",
258
+ "backbone.layers.31.mixer.D": "model-00002-of-00003.safetensors",
259
+ "backbone.layers.31.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
260
+ "backbone.layers.31.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
261
+ "backbone.layers.31.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
262
+ "backbone.layers.31.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
263
+ "backbone.layers.31.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
264
+ "backbone.layers.31.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
265
+ "backbone.layers.31.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
266
+ "backbone.layers.31.norm.weight": "model-00002-of-00003.safetensors",
267
+ "backbone.layers.32.mixer.A_log": "model-00002-of-00003.safetensors",
268
+ "backbone.layers.32.mixer.D": "model-00002-of-00003.safetensors",
269
+ "backbone.layers.32.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
270
+ "backbone.layers.32.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
271
+ "backbone.layers.32.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
272
+ "backbone.layers.32.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
273
+ "backbone.layers.32.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
274
+ "backbone.layers.32.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
275
+ "backbone.layers.32.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
276
+ "backbone.layers.32.norm.weight": "model-00002-of-00003.safetensors",
277
+ "backbone.layers.33.mixer.A_log": "model-00002-of-00003.safetensors",
278
+ "backbone.layers.33.mixer.D": "model-00002-of-00003.safetensors",
279
+ "backbone.layers.33.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
280
+ "backbone.layers.33.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
281
+ "backbone.layers.33.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
282
+ "backbone.layers.33.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
283
+ "backbone.layers.33.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
284
+ "backbone.layers.33.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
285
+ "backbone.layers.33.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
286
+ "backbone.layers.33.norm.weight": "model-00002-of-00003.safetensors",
287
+ "backbone.layers.34.mixer.A_log": "model-00002-of-00003.safetensors",
288
+ "backbone.layers.34.mixer.D": "model-00002-of-00003.safetensors",
289
+ "backbone.layers.34.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
290
+ "backbone.layers.34.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
291
+ "backbone.layers.34.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
292
+ "backbone.layers.34.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
293
+ "backbone.layers.34.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
294
+ "backbone.layers.34.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
295
+ "backbone.layers.34.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
296
+ "backbone.layers.34.norm.weight": "model-00002-of-00003.safetensors",
297
+ "backbone.layers.35.mixer.A_log": "model-00002-of-00003.safetensors",
298
+ "backbone.layers.35.mixer.D": "model-00002-of-00003.safetensors",
299
+ "backbone.layers.35.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
300
+ "backbone.layers.35.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
301
+ "backbone.layers.35.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
302
+ "backbone.layers.35.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
303
+ "backbone.layers.35.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
304
+ "backbone.layers.35.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
305
+ "backbone.layers.35.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
306
+ "backbone.layers.35.norm.weight": "model-00002-of-00003.safetensors",
307
+ "backbone.layers.36.mixer.A_log": "model-00002-of-00003.safetensors",
308
+ "backbone.layers.36.mixer.D": "model-00002-of-00003.safetensors",
309
+ "backbone.layers.36.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
310
+ "backbone.layers.36.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
311
+ "backbone.layers.36.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
312
+ "backbone.layers.36.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
313
+ "backbone.layers.36.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
314
+ "backbone.layers.36.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
315
+ "backbone.layers.36.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
316
+ "backbone.layers.36.norm.weight": "model-00002-of-00003.safetensors",
317
+ "backbone.layers.37.mixer.A_log": "model-00002-of-00003.safetensors",
318
+ "backbone.layers.37.mixer.D": "model-00002-of-00003.safetensors",
319
+ "backbone.layers.37.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
320
+ "backbone.layers.37.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
321
+ "backbone.layers.37.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
322
+ "backbone.layers.37.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
323
+ "backbone.layers.37.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
324
+ "backbone.layers.37.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
325
+ "backbone.layers.37.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
326
+ "backbone.layers.37.norm.weight": "model-00002-of-00003.safetensors",
327
+ "backbone.layers.38.mixer.A_log": "model-00002-of-00003.safetensors",
328
+ "backbone.layers.38.mixer.D": "model-00002-of-00003.safetensors",
329
+ "backbone.layers.38.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
330
+ "backbone.layers.38.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
331
+ "backbone.layers.38.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
332
+ "backbone.layers.38.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
333
+ "backbone.layers.38.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
334
+ "backbone.layers.38.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
335
+ "backbone.layers.38.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
336
+ "backbone.layers.38.norm.weight": "model-00002-of-00003.safetensors",
337
+ "backbone.layers.39.mixer.A_log": "model-00002-of-00003.safetensors",
338
+ "backbone.layers.39.mixer.D": "model-00002-of-00003.safetensors",
339
+ "backbone.layers.39.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
340
+ "backbone.layers.39.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
341
+ "backbone.layers.39.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
342
+ "backbone.layers.39.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
343
+ "backbone.layers.39.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
344
+ "backbone.layers.39.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
345
+ "backbone.layers.39.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
346
+ "backbone.layers.39.norm.weight": "model-00002-of-00003.safetensors",
347
+ "backbone.layers.4.mixer.A_log": "model-00001-of-00003.safetensors",
348
+ "backbone.layers.4.mixer.D": "model-00001-of-00003.safetensors",
349
+ "backbone.layers.4.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
350
+ "backbone.layers.4.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
351
+ "backbone.layers.4.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
352
+ "backbone.layers.4.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
353
+ "backbone.layers.4.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
354
+ "backbone.layers.4.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
355
+ "backbone.layers.4.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
356
+ "backbone.layers.4.norm.weight": "model-00001-of-00003.safetensors",
357
+ "backbone.layers.40.mixer.A_log": "model-00002-of-00003.safetensors",
358
+ "backbone.layers.40.mixer.D": "model-00002-of-00003.safetensors",
359
+ "backbone.layers.40.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
360
+ "backbone.layers.40.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
361
+ "backbone.layers.40.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
362
+ "backbone.layers.40.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
363
+ "backbone.layers.40.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
364
+ "backbone.layers.40.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
365
+ "backbone.layers.40.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
366
+ "backbone.layers.40.norm.weight": "model-00002-of-00003.safetensors",
367
+ "backbone.layers.41.mixer.A_log": "model-00002-of-00003.safetensors",
368
+ "backbone.layers.41.mixer.D": "model-00002-of-00003.safetensors",
369
+ "backbone.layers.41.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
370
+ "backbone.layers.41.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
371
+ "backbone.layers.41.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
372
+ "backbone.layers.41.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
373
+ "backbone.layers.41.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
374
+ "backbone.layers.41.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
375
+ "backbone.layers.41.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
376
+ "backbone.layers.41.norm.weight": "model-00002-of-00003.safetensors",
377
+ "backbone.layers.42.mixer.A_log": "model-00002-of-00003.safetensors",
378
+ "backbone.layers.42.mixer.D": "model-00002-of-00003.safetensors",
379
+ "backbone.layers.42.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
380
+ "backbone.layers.42.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
381
+ "backbone.layers.42.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
382
+ "backbone.layers.42.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
383
+ "backbone.layers.42.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
384
+ "backbone.layers.42.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
385
+ "backbone.layers.42.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
386
+ "backbone.layers.42.norm.weight": "model-00002-of-00003.safetensors",
387
+ "backbone.layers.43.mixer.A_log": "model-00002-of-00003.safetensors",
388
+ "backbone.layers.43.mixer.D": "model-00002-of-00003.safetensors",
389
+ "backbone.layers.43.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
390
+ "backbone.layers.43.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
391
+ "backbone.layers.43.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
392
+ "backbone.layers.43.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
393
+ "backbone.layers.43.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
394
+ "backbone.layers.43.mixer.out_proj.weight": "model-00002-of-00003.safetensors",
395
+ "backbone.layers.43.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
396
+ "backbone.layers.43.norm.weight": "model-00002-of-00003.safetensors",
397
+ "backbone.layers.44.mixer.A_log": "model-00002-of-00003.safetensors",
398
+ "backbone.layers.44.mixer.D": "model-00002-of-00003.safetensors",
399
+ "backbone.layers.44.mixer.conv1d.bias": "model-00002-of-00003.safetensors",
400
+ "backbone.layers.44.mixer.conv1d.weight": "model-00002-of-00003.safetensors",
401
+ "backbone.layers.44.mixer.dt_proj.bias": "model-00002-of-00003.safetensors",
402
+ "backbone.layers.44.mixer.dt_proj.weight": "model-00002-of-00003.safetensors",
403
+ "backbone.layers.44.mixer.in_proj.weight": "model-00002-of-00003.safetensors",
404
+ "backbone.layers.44.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
405
+ "backbone.layers.44.mixer.x_proj.weight": "model-00002-of-00003.safetensors",
406
+ "backbone.layers.44.norm.weight": "model-00002-of-00003.safetensors",
407
+ "backbone.layers.45.mixer.A_log": "model-00003-of-00003.safetensors",
408
+ "backbone.layers.45.mixer.D": "model-00003-of-00003.safetensors",
409
+ "backbone.layers.45.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
410
+ "backbone.layers.45.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
411
+ "backbone.layers.45.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
412
+ "backbone.layers.45.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
413
+ "backbone.layers.45.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
414
+ "backbone.layers.45.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
415
+ "backbone.layers.45.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
416
+ "backbone.layers.45.norm.weight": "model-00003-of-00003.safetensors",
417
+ "backbone.layers.46.mixer.A_log": "model-00003-of-00003.safetensors",
418
+ "backbone.layers.46.mixer.D": "model-00003-of-00003.safetensors",
419
+ "backbone.layers.46.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
420
+ "backbone.layers.46.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
421
+ "backbone.layers.46.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
422
+ "backbone.layers.46.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
423
+ "backbone.layers.46.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
424
+ "backbone.layers.46.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
425
+ "backbone.layers.46.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
426
+ "backbone.layers.46.norm.weight": "model-00003-of-00003.safetensors",
427
+ "backbone.layers.47.mixer.A_log": "model-00003-of-00003.safetensors",
428
+ "backbone.layers.47.mixer.D": "model-00003-of-00003.safetensors",
429
+ "backbone.layers.47.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
430
+ "backbone.layers.47.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
431
+ "backbone.layers.47.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
432
+ "backbone.layers.47.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
433
+ "backbone.layers.47.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
434
+ "backbone.layers.47.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
435
+ "backbone.layers.47.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
436
+ "backbone.layers.47.norm.weight": "model-00003-of-00003.safetensors",
437
+ "backbone.layers.48.mixer.A_log": "model-00003-of-00003.safetensors",
438
+ "backbone.layers.48.mixer.D": "model-00003-of-00003.safetensors",
439
+ "backbone.layers.48.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
440
+ "backbone.layers.48.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
441
+ "backbone.layers.48.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
442
+ "backbone.layers.48.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
443
+ "backbone.layers.48.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
444
+ "backbone.layers.48.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
445
+ "backbone.layers.48.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
446
+ "backbone.layers.48.norm.weight": "model-00003-of-00003.safetensors",
447
+ "backbone.layers.49.mixer.A_log": "model-00003-of-00003.safetensors",
448
+ "backbone.layers.49.mixer.D": "model-00003-of-00003.safetensors",
449
+ "backbone.layers.49.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
450
+ "backbone.layers.49.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
451
+ "backbone.layers.49.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
452
+ "backbone.layers.49.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
453
+ "backbone.layers.49.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
454
+ "backbone.layers.49.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
455
+ "backbone.layers.49.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
456
+ "backbone.layers.49.norm.weight": "model-00003-of-00003.safetensors",
457
+ "backbone.layers.5.mixer.A_log": "model-00001-of-00003.safetensors",
458
+ "backbone.layers.5.mixer.D": "model-00001-of-00003.safetensors",
459
+ "backbone.layers.5.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
460
+ "backbone.layers.5.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
461
+ "backbone.layers.5.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
462
+ "backbone.layers.5.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
463
+ "backbone.layers.5.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
464
+ "backbone.layers.5.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
465
+ "backbone.layers.5.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
466
+ "backbone.layers.5.norm.weight": "model-00001-of-00003.safetensors",
467
+ "backbone.layers.50.mixer.A_log": "model-00003-of-00003.safetensors",
468
+ "backbone.layers.50.mixer.D": "model-00003-of-00003.safetensors",
469
+ "backbone.layers.50.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
470
+ "backbone.layers.50.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
471
+ "backbone.layers.50.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
472
+ "backbone.layers.50.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
473
+ "backbone.layers.50.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
474
+ "backbone.layers.50.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
475
+ "backbone.layers.50.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
476
+ "backbone.layers.50.norm.weight": "model-00003-of-00003.safetensors",
477
+ "backbone.layers.51.mixer.A_log": "model-00003-of-00003.safetensors",
478
+ "backbone.layers.51.mixer.D": "model-00003-of-00003.safetensors",
479
+ "backbone.layers.51.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
480
+ "backbone.layers.51.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
481
+ "backbone.layers.51.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
482
+ "backbone.layers.51.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
483
+ "backbone.layers.51.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
484
+ "backbone.layers.51.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
485
+ "backbone.layers.51.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
486
+ "backbone.layers.51.norm.weight": "model-00003-of-00003.safetensors",
487
+ "backbone.layers.52.mixer.A_log": "model-00003-of-00003.safetensors",
488
+ "backbone.layers.52.mixer.D": "model-00003-of-00003.safetensors",
489
+ "backbone.layers.52.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
490
+ "backbone.layers.52.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
491
+ "backbone.layers.52.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
492
+ "backbone.layers.52.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
493
+ "backbone.layers.52.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
494
+ "backbone.layers.52.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
495
+ "backbone.layers.52.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
496
+ "backbone.layers.52.norm.weight": "model-00003-of-00003.safetensors",
497
+ "backbone.layers.53.mixer.A_log": "model-00003-of-00003.safetensors",
498
+ "backbone.layers.53.mixer.D": "model-00003-of-00003.safetensors",
499
+ "backbone.layers.53.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
500
+ "backbone.layers.53.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
501
+ "backbone.layers.53.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
502
+ "backbone.layers.53.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
503
+ "backbone.layers.53.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
504
+ "backbone.layers.53.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
505
+ "backbone.layers.53.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
506
+ "backbone.layers.53.norm.weight": "model-00003-of-00003.safetensors",
507
+ "backbone.layers.54.mixer.A_log": "model-00003-of-00003.safetensors",
508
+ "backbone.layers.54.mixer.D": "model-00003-of-00003.safetensors",
509
+ "backbone.layers.54.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
510
+ "backbone.layers.54.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
511
+ "backbone.layers.54.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
512
+ "backbone.layers.54.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
513
+ "backbone.layers.54.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
514
+ "backbone.layers.54.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
515
+ "backbone.layers.54.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
516
+ "backbone.layers.54.norm.weight": "model-00003-of-00003.safetensors",
517
+ "backbone.layers.55.mixer.A_log": "model-00003-of-00003.safetensors",
518
+ "backbone.layers.55.mixer.D": "model-00003-of-00003.safetensors",
519
+ "backbone.layers.55.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
520
+ "backbone.layers.55.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
521
+ "backbone.layers.55.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
522
+ "backbone.layers.55.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
523
+ "backbone.layers.55.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
524
+ "backbone.layers.55.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
525
+ "backbone.layers.55.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
526
+ "backbone.layers.55.norm.weight": "model-00003-of-00003.safetensors",
527
+ "backbone.layers.56.mixer.A_log": "model-00003-of-00003.safetensors",
528
+ "backbone.layers.56.mixer.D": "model-00003-of-00003.safetensors",
529
+ "backbone.layers.56.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
530
+ "backbone.layers.56.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
531
+ "backbone.layers.56.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
532
+ "backbone.layers.56.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
533
+ "backbone.layers.56.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
534
+ "backbone.layers.56.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
535
+ "backbone.layers.56.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
536
+ "backbone.layers.56.norm.weight": "model-00003-of-00003.safetensors",
537
+ "backbone.layers.57.mixer.A_log": "model-00003-of-00003.safetensors",
538
+ "backbone.layers.57.mixer.D": "model-00003-of-00003.safetensors",
539
+ "backbone.layers.57.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
540
+ "backbone.layers.57.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
541
+ "backbone.layers.57.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
542
+ "backbone.layers.57.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
543
+ "backbone.layers.57.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
544
+ "backbone.layers.57.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
545
+ "backbone.layers.57.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
546
+ "backbone.layers.57.norm.weight": "model-00003-of-00003.safetensors",
547
+ "backbone.layers.58.mixer.A_log": "model-00003-of-00003.safetensors",
548
+ "backbone.layers.58.mixer.D": "model-00003-of-00003.safetensors",
549
+ "backbone.layers.58.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
550
+ "backbone.layers.58.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
551
+ "backbone.layers.58.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
552
+ "backbone.layers.58.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
553
+ "backbone.layers.58.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
554
+ "backbone.layers.58.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
555
+ "backbone.layers.58.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
556
+ "backbone.layers.58.norm.weight": "model-00003-of-00003.safetensors",
557
+ "backbone.layers.59.mixer.A_log": "model-00003-of-00003.safetensors",
558
+ "backbone.layers.59.mixer.D": "model-00003-of-00003.safetensors",
559
+ "backbone.layers.59.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
560
+ "backbone.layers.59.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
561
+ "backbone.layers.59.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
562
+ "backbone.layers.59.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
563
+ "backbone.layers.59.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
564
+ "backbone.layers.59.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
565
+ "backbone.layers.59.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
566
+ "backbone.layers.59.norm.weight": "model-00003-of-00003.safetensors",
567
+ "backbone.layers.6.mixer.A_log": "model-00001-of-00003.safetensors",
568
+ "backbone.layers.6.mixer.D": "model-00001-of-00003.safetensors",
569
+ "backbone.layers.6.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
570
+ "backbone.layers.6.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
571
+ "backbone.layers.6.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
572
+ "backbone.layers.6.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
573
+ "backbone.layers.6.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
574
+ "backbone.layers.6.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
575
+ "backbone.layers.6.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
576
+ "backbone.layers.6.norm.weight": "model-00001-of-00003.safetensors",
577
+ "backbone.layers.60.mixer.A_log": "model-00003-of-00003.safetensors",
578
+ "backbone.layers.60.mixer.D": "model-00003-of-00003.safetensors",
579
+ "backbone.layers.60.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
580
+ "backbone.layers.60.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
581
+ "backbone.layers.60.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
582
+ "backbone.layers.60.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
583
+ "backbone.layers.60.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
584
+ "backbone.layers.60.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
585
+ "backbone.layers.60.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
586
+ "backbone.layers.60.norm.weight": "model-00003-of-00003.safetensors",
587
+ "backbone.layers.61.mixer.A_log": "model-00003-of-00003.safetensors",
588
+ "backbone.layers.61.mixer.D": "model-00003-of-00003.safetensors",
589
+ "backbone.layers.61.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
590
+ "backbone.layers.61.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
591
+ "backbone.layers.61.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
592
+ "backbone.layers.61.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
593
+ "backbone.layers.61.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
594
+ "backbone.layers.61.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
595
+ "backbone.layers.61.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
596
+ "backbone.layers.61.norm.weight": "model-00003-of-00003.safetensors",
597
+ "backbone.layers.62.mixer.A_log": "model-00003-of-00003.safetensors",
598
+ "backbone.layers.62.mixer.D": "model-00003-of-00003.safetensors",
599
+ "backbone.layers.62.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
600
+ "backbone.layers.62.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
601
+ "backbone.layers.62.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
602
+ "backbone.layers.62.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
603
+ "backbone.layers.62.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
604
+ "backbone.layers.62.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
605
+ "backbone.layers.62.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
606
+ "backbone.layers.62.norm.weight": "model-00003-of-00003.safetensors",
607
+ "backbone.layers.63.mixer.A_log": "model-00003-of-00003.safetensors",
608
+ "backbone.layers.63.mixer.D": "model-00003-of-00003.safetensors",
609
+ "backbone.layers.63.mixer.conv1d.bias": "model-00003-of-00003.safetensors",
610
+ "backbone.layers.63.mixer.conv1d.weight": "model-00003-of-00003.safetensors",
611
+ "backbone.layers.63.mixer.dt_proj.bias": "model-00003-of-00003.safetensors",
612
+ "backbone.layers.63.mixer.dt_proj.weight": "model-00003-of-00003.safetensors",
613
+ "backbone.layers.63.mixer.in_proj.weight": "model-00003-of-00003.safetensors",
614
+ "backbone.layers.63.mixer.out_proj.weight": "model-00003-of-00003.safetensors",
615
+ "backbone.layers.63.mixer.x_proj.weight": "model-00003-of-00003.safetensors",
616
+ "backbone.layers.63.norm.weight": "model-00003-of-00003.safetensors",
617
+ "backbone.layers.7.mixer.A_log": "model-00001-of-00003.safetensors",
618
+ "backbone.layers.7.mixer.D": "model-00001-of-00003.safetensors",
619
+ "backbone.layers.7.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
620
+ "backbone.layers.7.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
621
+ "backbone.layers.7.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
622
+ "backbone.layers.7.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
623
+ "backbone.layers.7.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
624
+ "backbone.layers.7.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
625
+ "backbone.layers.7.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
626
+ "backbone.layers.7.norm.weight": "model-00001-of-00003.safetensors",
627
+ "backbone.layers.8.mixer.A_log": "model-00001-of-00003.safetensors",
628
+ "backbone.layers.8.mixer.D": "model-00001-of-00003.safetensors",
629
+ "backbone.layers.8.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
630
+ "backbone.layers.8.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
631
+ "backbone.layers.8.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
632
+ "backbone.layers.8.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
633
+ "backbone.layers.8.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
634
+ "backbone.layers.8.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
635
+ "backbone.layers.8.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
636
+ "backbone.layers.8.norm.weight": "model-00001-of-00003.safetensors",
637
+ "backbone.layers.9.mixer.A_log": "model-00001-of-00003.safetensors",
638
+ "backbone.layers.9.mixer.D": "model-00001-of-00003.safetensors",
639
+ "backbone.layers.9.mixer.conv1d.bias": "model-00001-of-00003.safetensors",
640
+ "backbone.layers.9.mixer.conv1d.weight": "model-00001-of-00003.safetensors",
641
+ "backbone.layers.9.mixer.dt_proj.bias": "model-00001-of-00003.safetensors",
642
+ "backbone.layers.9.mixer.dt_proj.weight": "model-00001-of-00003.safetensors",
643
+ "backbone.layers.9.mixer.in_proj.weight": "model-00001-of-00003.safetensors",
644
+ "backbone.layers.9.mixer.out_proj.weight": "model-00001-of-00003.safetensors",
645
+ "backbone.layers.9.mixer.x_proj.weight": "model-00001-of-00003.safetensors",
646
+ "backbone.layers.9.norm.weight": "model-00001-of-00003.safetensors",
647
+ "backbone.norm_f.weight": "model-00003-of-00003.safetensors",
648
+ "lm_head.weight": "model-00003-of-00003.safetensors"
649
+ }
650
+ }
base_model/special_tokens_map.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ ">>TITLE<<",
4
+ ">>ABSTRACT<<",
5
+ ">>INTRODUCTION<<",
6
+ ">>SUMMARY<<",
7
+ ">>COMMENT<<",
8
+ ">>ANSWER<<",
9
+ ">>QUESTION<<",
10
+ "assistant",
11
+ "<|begin_of_text|>",
12
+ "<|im_start|>",
13
+ "<|im_end|>"
14
+ ],
15
+ "bos_token": {
16
+ "content": "<|begin_of_text|>",
17
+ "lstrip": false,
18
+ "normalized": false,
19
+ "rstrip": false,
20
+ "single_word": false
21
+ },
22
+ "eos_token": {
23
+ "content": "<|end_of_text|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false
28
+ },
29
+ "pad_token": {
30
+ "content": "<|end_of_text|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ }
36
+ }
base_model/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
base_model/tokenizer_config.json ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": ">>TITLE<<",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": ">>ABSTRACT<<",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": ">>INTRODUCTION<<",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": ">>SUMMARY<<",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": ">>COMMENT<<",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": ">>ANSWER<<",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": ">>QUESTION<<",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": "assistant",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": "<|begin_of_text|>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": "<|im_start|>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": "<|im_end|>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<|end_of_text|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ }
100
+ },
101
+ "additional_special_tokens": [
102
+ ">>TITLE<<",
103
+ ">>ABSTRACT<<",
104
+ ">>INTRODUCTION<<",
105
+ ">>SUMMARY<<",
106
+ ">>COMMENT<<",
107
+ ">>ANSWER<<",
108
+ ">>QUESTION<<",
109
+ "assistant",
110
+ "<|begin_of_text|>",
111
+ "<|im_start|>",
112
+ "<|im_end|>"
113
+ ],
114
+ "bos_token": "<|begin_of_text|>",
115
+ "chat_template": "{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
116
+ "clean_up_tokenization_spaces": true,
117
+ "eos_token": "<|end_of_text|>",
118
+ "max_length": null,
119
+ "model_input_names": [
120
+ "input_ids",
121
+ "attention_mask"
122
+ ],
123
+ "model_max_length": 1000000000000000019884624838656,
124
+ "pad_to_multiple_of": null,
125
+ "pad_token": ">>TITLE<<",
126
+ "pad_token_type_id": 0,
127
+ "padding_side": "left",
128
+ "tokenizer_class": "PreTrainedTokenizerFast"
129
+ }